max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
data_processing/process_xls.py | luisroel91/libdib_assesment | 0 | 8300 | import pandas as pd
# Define our header
col_names = [
"year",
"num_males_with_income",
"male_median_income_curr_dollars",
"male_median_income_2019_dollars",
"num_females_with_income",
"female_median_income_curr_dollars",
"female_median_income_2019_dollars",
]
# Load Asian census data XLS, skipping all headers
dfa = pd.read_excel(
r'p08a.xlsx',
skiprows=8,
# Make sure PD doesn't use header row for our DF
header=None,
# Define col names
names=col_names,
)
# Load White census data XLS, skipping all headers
dfw = pd.read_excel(
r'p08w.xlsx',
skiprows=8,
# Make sure PD doesn't use header row for our DF
header=None,
# Define cold names
names=col_names
)
# Splinter off rows into age group DFs for both sets of data
dfa1524 = dfa.iloc[:20]
dfa2534 = dfa.iloc[25:45]
dfa3544 = dfa.iloc[50:70]
dfa4554 = dfa.iloc[75:95]
dfa5564 = dfa.iloc[100:120]
dfa6574 = dfa.iloc[125:145]
dfa75 = dfa.iloc[150:170]
dfw1524 = dfw.iloc[:20]
dfw2534 = dfw.iloc[25:45]
dfw3544 = dfw.iloc[50:70]
dfw4554 = dfw.iloc[75:95]
dfw5564 = dfw.iloc[100:120]
dfw6574 = dfw.iloc[125:145]
dfw75 = dfw.iloc[150:170]
# Add Age Range col to each DF
dfa1524.insert(0, 'age_range', '15-24')
dfa2534.insert(0, 'age_range', '25-34')
dfa3544.insert(0, 'age_range', '35-44')
dfa4554.insert(0, 'age_range', '45-54')
dfa5564.insert(0, 'age_range', '55-64')
dfa6574.insert(0, 'age_range', '65-74')
dfa75.insert(0, 'age_range', 'Over 75')
dfw1524.insert(0, 'age_range', '15-24')
dfw2534.insert(0, 'age_range', '25-34')
dfw3544.insert(0, 'age_range', '35-44')
dfw4554.insert(0, 'age_range', '45-54')
dfw5564.insert(0, 'age_range', '55-64')
dfw6574.insert(0, 'age_range', '65-74')
dfw75.insert(0, 'age_range', 'Over 75')
# Stack cleaned DF's vertically
dfa = pd.concat([
dfa1524,
dfa2534,
dfa3544,
dfa4554,
dfa5564,
dfa6574,
dfa75
], axis=0)
dfw = pd.concat([
dfw1524,
dfw2534,
dfw3544,
dfw4554,
dfw5564,
dfw6574,
dfw75
], axis=0)
# Add Race col
dfa.insert(0, 'race', 'asian')
dfw.insert(0, 'race', 'white')
# Clean garbage chars in Year col using regex
dfa['year'] = dfa['year'].replace(to_replace=r'(\s\(\d+\))', value='', regex=True)
dfw['year'] = dfw['year'].replace(to_replace=r'(\s\(\d+\))', value='', regex=True)
# Stack our cleaned + normalized data into a single DF
df = pd.concat([
dfa,
dfw
], axis=0)
# Convert the DF col types to conform to our CensusRecord model
df = df.astype({
"race": str,
"age_range": str,
"year": int,
"num_males_with_income": int,
"male_median_income_curr_dollars": float,
"male_median_income_2019_dollars": float,
"num_females_with_income": int,
"female_median_income_curr_dollars": float,
"female_median_income_2019_dollars": float,
})
# Pickle the DF
df.to_pickle("./res.pkl")
| import pandas as pd
# Define our header
col_names = [
"year",
"num_males_with_income",
"male_median_income_curr_dollars",
"male_median_income_2019_dollars",
"num_females_with_income",
"female_median_income_curr_dollars",
"female_median_income_2019_dollars",
]
# Load Asian census data XLS, skipping all headers
dfa = pd.read_excel(
r'p08a.xlsx',
skiprows=8,
# Make sure PD doesn't use header row for our DF
header=None,
# Define col names
names=col_names,
)
# Load White census data XLS, skipping all headers
dfw = pd.read_excel(
r'p08w.xlsx',
skiprows=8,
# Make sure PD doesn't use header row for our DF
header=None,
# Define cold names
names=col_names
)
# Splinter off rows into age group DFs for both sets of data
dfa1524 = dfa.iloc[:20]
dfa2534 = dfa.iloc[25:45]
dfa3544 = dfa.iloc[50:70]
dfa4554 = dfa.iloc[75:95]
dfa5564 = dfa.iloc[100:120]
dfa6574 = dfa.iloc[125:145]
dfa75 = dfa.iloc[150:170]
dfw1524 = dfw.iloc[:20]
dfw2534 = dfw.iloc[25:45]
dfw3544 = dfw.iloc[50:70]
dfw4554 = dfw.iloc[75:95]
dfw5564 = dfw.iloc[100:120]
dfw6574 = dfw.iloc[125:145]
dfw75 = dfw.iloc[150:170]
# Add Age Range col to each DF
dfa1524.insert(0, 'age_range', '15-24')
dfa2534.insert(0, 'age_range', '25-34')
dfa3544.insert(0, 'age_range', '35-44')
dfa4554.insert(0, 'age_range', '45-54')
dfa5564.insert(0, 'age_range', '55-64')
dfa6574.insert(0, 'age_range', '65-74')
dfa75.insert(0, 'age_range', 'Over 75')
dfw1524.insert(0, 'age_range', '15-24')
dfw2534.insert(0, 'age_range', '25-34')
dfw3544.insert(0, 'age_range', '35-44')
dfw4554.insert(0, 'age_range', '45-54')
dfw5564.insert(0, 'age_range', '55-64')
dfw6574.insert(0, 'age_range', '65-74')
dfw75.insert(0, 'age_range', 'Over 75')
# Stack cleaned DF's vertically
dfa = pd.concat([
dfa1524,
dfa2534,
dfa3544,
dfa4554,
dfa5564,
dfa6574,
dfa75
], axis=0)
dfw = pd.concat([
dfw1524,
dfw2534,
dfw3544,
dfw4554,
dfw5564,
dfw6574,
dfw75
], axis=0)
# Add Race col
dfa.insert(0, 'race', 'asian')
dfw.insert(0, 'race', 'white')
# Clean garbage chars in Year col using regex
dfa['year'] = dfa['year'].replace(to_replace=r'(\s\(\d+\))', value='', regex=True)
dfw['year'] = dfw['year'].replace(to_replace=r'(\s\(\d+\))', value='', regex=True)
# Stack our cleaned + normalized data into a single DF
df = pd.concat([
dfa,
dfw
], axis=0)
# Convert the DF col types to conform to our CensusRecord model
df = df.astype({
"race": str,
"age_range": str,
"year": int,
"num_males_with_income": int,
"male_median_income_curr_dollars": float,
"male_median_income_2019_dollars": float,
"num_females_with_income": int,
"female_median_income_curr_dollars": float,
"female_median_income_2019_dollars": float,
})
# Pickle the DF
df.to_pickle("./res.pkl")
| en | 0.843256 | # Define our header # Load Asian census data XLS, skipping all headers # Make sure PD doesn't use header row for our DF # Define col names # Load White census data XLS, skipping all headers # Make sure PD doesn't use header row for our DF # Define cold names # Splinter off rows into age group DFs for both sets of data # Add Age Range col to each DF # Stack cleaned DF's vertically # Add Race col # Clean garbage chars in Year col using regex # Stack our cleaned + normalized data into a single DF # Convert the DF col types to conform to our CensusRecord model # Pickle the DF | 3.22333 | 3 |
Section_1/Exercise_16.py | Szymon-Budziak/WDI_exercises_solutions | 0 | 8301 | """
Dany jest ciąg określony wzorem: A[n+1] = (A[n] % 2) ∗ (3 ∗ A[n] + 1) + (1 − A[n] % 2) ∗ A[n] / 2.
Startując z dowolnej liczby naturalnej > 1 ciąg ten osiąga wartość 1. Napisać program, który
znajdzie wyraz początkowy z przedziału 2-10000 dla którego wartość 1 jest osiągalna po największej
liczbie kroków.
"""
a0 = 2
m = 1
for a0 in range(2, 10000):
n = 0
while a0 != 1:
a0 = (((a0 % 2) * (3 * a0 + 1)) + ((1 - (a0 % 2)) * (a0 / 2)))
n += 1
if n > m:
m = n
a0 += 1
print(m)
| """
Dany jest ciąg określony wzorem: A[n+1] = (A[n] % 2) ∗ (3 ∗ A[n] + 1) + (1 − A[n] % 2) ∗ A[n] / 2.
Startując z dowolnej liczby naturalnej > 1 ciąg ten osiąga wartość 1. Napisać program, który
znajdzie wyraz początkowy z przedziału 2-10000 dla którego wartość 1 jest osiągalna po największej
liczbie kroków.
"""
a0 = 2
m = 1
for a0 in range(2, 10000):
n = 0
while a0 != 1:
a0 = (((a0 % 2) * (3 * a0 + 1)) + ((1 - (a0 % 2)) * (a0 / 2)))
n += 1
if n > m:
m = n
a0 += 1
print(m)
| pl | 0.998092 | Dany jest ciąg określony wzorem: A[n+1] = (A[n] % 2) ∗ (3 ∗ A[n] + 1) + (1 − A[n] % 2) ∗ A[n] / 2. Startując z dowolnej liczby naturalnej > 1 ciąg ten osiąga wartość 1. Napisać program, który znajdzie wyraz początkowy z przedziału 2-10000 dla którego wartość 1 jest osiągalna po największej liczbie kroków. | 3.019995 | 3 |
SysPy_ver/funcs/_var_declaration.py | evlog/SysPy | 4 | 8302 | <filename>SysPy_ver/funcs/_var_declaration.py
"""
*****************************************************************************
*
H E A D E R I N F O R M A T I O N *
*
*****************************************************************************
Project Name: SysPy (System Python)
http://cgi.di.uoa.gr/~evlog/syspy.html
File Name: _var_declaration.py
Created by: <NAME>
*****************************************************************************
*
C O P Y R I G H T N O T I C E *
*
*****************************************************************************
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2.1 of the License, a copy of which is available from
http://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
*****************************************************************************
*
D E S C R I P T I O N *
*
*****************************************************************************
Variable declaration when a variable assignment is tracked.
"""
from pdb import *
def var_declaration(assign_lines_count, token_struct, assign_lines, signals, process_vars):
"""
FUNCTION: var_declaration(a int, b(), c[], d[], e[])
a: assign lines counter integer
b: token's tupple
c: list containing the VHDL code
d: list containing the signal statements
e: list containing
Variable declaration when a variable assignment is tracked.
"""
# Python's variable declerations
#----------------------------------------------------------------------------------------------------------------------------------
count0 = 0
count1 = 0
process_vars_d = []
vars0 = []
var0 = ''
var1 = ''
#----------------------------------------------------------------------------------------------------------------------------------
print("process_vars:", process_vars)
# Erasing duplicated registrations in "process_vars[]"
#----------------------------------------------------------------------------------------------------------------------------------
for i in range(len(process_vars)):
vars0 = []
#flag_process_vars = 0
if ((process_vars[i][0] == "name_left") or (process_vars[i][0] == "name_right")):
var0 = process_vars[i][1].replace('=', '')
var0 = var0.replace('! ', '')
var0 = var0.replace('>', '')
var0 = var0.replace('<', '')
var0 = var0.replace(' ', '')
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice_var0"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice_var1"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice_var01"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_item"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_item_var"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item_var0"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item_var1"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item_var01"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var0"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var1"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var2"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][3]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var01"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var02"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][3]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var12"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
var0 = process_vars[i][1][3]
vars0.append(var0)
flag_process_vars = 0
for n in range(0, len(vars0)):
for j in range(len(process_vars_d)):
if ((process_vars_d[j][0] == "name_left") or (process_vars_d[j][0] == "name_right")):
var1 = process_vars_d[j][1].replace('=', '')
var1 = var1.replace('! ', '')
var1 = var1.replace('>', '')
var1 = var1.replace('<', '')
var1 = var1.replace(' ', '')
elif (process_vars_d[j][0] == "name_right_binary_slice"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_binary_slice_var0"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_binary_slice_var1"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_binary_slice_var01"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_item"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_item_var"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_item"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_array_binary_item_var0"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_item_var1"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_item_var01"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var0"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var1"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var2"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var01"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var02"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var12"):
var1 = process_vars_d[j][1]
if (vars0[n] == var1):
if (n == 0):
flag_process_vars += 1
if (n == 1):
flag_process_vars += 2
if (n == 2):
flag_process_vars += 4
if ((process_vars[i][0] == "name_left") or (process_vars[i][0] == "name_right")):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_binary_slice"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_binary_slice_var0"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_binary_slice_var1"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][0]])
elif (flag_process_vars == 4):
pass
elif (process_vars[i][0] == "name_right_binary_slice_var01"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_item"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_item_var"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_item_var", process_vars[i][1][0]])
process_vars_d.append(["name_right_item_var", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_item_var", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_item_var", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_item"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_array_binary_item_var0"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_item_var1"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_item_var01"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_array_binary_slice_var0"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var1"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var2"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][3]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][3]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var01"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var02"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var12"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
process_vars = process_vars_d
#----------------------------------------------------------------------------------------------------------------------------------
j = assign_lines_count
for m in range(0, len(process_vars)):
if ((process_vars[m][0] == "name_left") or (process_vars[m][0] == "name_right")):
t = process_vars[m][1].replace('=', '')
t = t.replace(' ', '')
elif (process_vars[m][0] == "name_right_binary_slice"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_binary_slice_var0"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_binary_slice_var1"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_binary_slice_var01"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_item"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_item_var"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_item"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_array_binary_item_var0"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_item_var1"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_item_var01"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_array_binary_slice_var0"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var1"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var2"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var01"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var02"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var12"):
t = process_vars[m][1]
for i in range (0, len(signals)):
if (t == signals[i]['N']):
if (signals[i]['D'] == 'v'):
L = signals[i]['L'].__doc__
n = signals[i]['N'].__doc__
if (m == 0):
sp = ''
while 1:
if (assign_lines[j][0] == "process_sens_list"):
assign_lines[j][0] = assign_lines[j][0] + "_var"
for k in range(0, assign_lines[j][4]):
sp = sp + ' '
assign_lines[j][1] = assign_lines[j][1].replace("begin", '')
assign_lines[j][1] = assign_lines[j][1] + "\n\n" + sp + "-- Variables"
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "-------------------------------------------------------------------"
if (signals[i]['T'] == 'b'):
if (L.find("int") == 0):
if (n.find("list") == 0):
for k in range(len(signals_intr[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (L.find("list") == 0):
if (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals_intr[i]['L'][0] > signals_intr[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
break
elif (signals[i]['T'] == "int"):
if (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
elif (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals_intr[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
break
elif (signals[i]['T'] == "arrb"):
if (n.find("str") == 0):
if (signals[i]['L'][1][0] > signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type type" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals_intr[i]['L'][1][0]) + " downto " + str(signals_intr[i]['L'][1][1]) + ");\n"
elif (signals[i]['L'][1][0] < signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type type" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals_intr[i]['L'][1][0]) + " to " + str(signals_intr[i]['L'][1][1]) + ");\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": \"" + signals[i]['V'] + "\";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": {"
for k in range(0, (signals[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\"};\n"
elif (k != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\", "
count0 = count0 + 1
break
elif (signals[i]['T'] == "arri"):
if (n.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type type" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of integer range " + str(signals[i]['L'][1][0]) + " to " + str(signals[i]['L'][1][1]) + ";\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": " + str(signals[i]['V']) + ";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": {"
for k in range(0, (signals_intr[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + "};\n"
elif (j != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ", "
count0 = count0 + 1
break
elif (signals[i]['T'] == 's'):
v = signals[i]['V'].__doc__
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type state_type" + str(count1) + " is ("
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'] + ");\n"
elif (v.find("list") == 0):
for k in range(len(signals[i]['V'])):
if (k == (len(signals[i]['V']) - 1)):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ", "
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "signal " + args[i]['N'] + ": state_type" + str(count1) + ";\n"
count1 = count1 + 1
break
elif (j == 0):
break
j = j - 1
elif (m != 0):
if (signals[i]['T'] == 'b'):
if (L.find("int") == 0):
if (n.find("list") == 0):
for k in range(len(signals_intr[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (L.find("list") == 0):
if (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals_intr[i]['L'][0] > signals_intr[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
elif (signals[i]['T'] == "int"):
if (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
elif (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals_intr[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
elif (signals[i]['T'] == "arrb"):
if (n.find("str") == 0):
if (signals[i]['L'][1][0] > signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "type typev" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals[i]['L'][1][0]) + " downto " + str(signals[i]['L'][1][1]) + ");\n"
elif (signals[i]['L'][1][0] < signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "type typev" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals_intr[i]['L'][1][0]) + " to " + str(signals_intr[i]['L'][1][1]) + ");\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": \"" + signals[i]['V'] + "\";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": {"
for k in range(0, (signals[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\"};\n"
elif (k != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\", "
count0 = count0 + 1
elif (signals[i]['T'] == "arri"):
if (n.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "type typev" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of integer range " + str(signals[i]['L'][1][0]) + " to " + str(signals[i]['L'][1][1]) + ";\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": " + str(signals[i]['V']) + ";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": {"
for k in range(0, (signals[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + str(signals[i]['V'][k]) + "};\n"
elif (j != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + str(signals[i]['V'][k]) + ", "
count0 = count0 + 1
elif (signals[i]['T'] == 's'):
v = signals[i]['V'].__doc__
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type state_typev" + str(count1) + " is ("
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'] + ");\n"
elif (v.find("list") == 0):
for k in range(len(signals[i]['V'])):
if (k == (len(signals[i]['V']) - 1)):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ", "
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "signal " + args[i]['N'] + ": state_typev" + str(count1) + ";\n"
count1 = count1 + 1
if (len(process_vars) > 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "-------------------------------------------------------------------"
assign_lines[j][1] = assign_lines[j][1] + "\n\n" + sp + "begin\n\n"
| <filename>SysPy_ver/funcs/_var_declaration.py
"""
*****************************************************************************
*
H E A D E R I N F O R M A T I O N *
*
*****************************************************************************
Project Name: SysPy (System Python)
http://cgi.di.uoa.gr/~evlog/syspy.html
File Name: _var_declaration.py
Created by: <NAME>
*****************************************************************************
*
C O P Y R I G H T N O T I C E *
*
*****************************************************************************
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2.1 of the License, a copy of which is available from
http://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
*****************************************************************************
*
D E S C R I P T I O N *
*
*****************************************************************************
Variable declaration when a variable assignment is tracked.
"""
from pdb import *
def var_declaration(assign_lines_count, token_struct, assign_lines, signals, process_vars):
"""
FUNCTION: var_declaration(a int, b(), c[], d[], e[])
a: assign lines counter integer
b: token's tupple
c: list containing the VHDL code
d: list containing the signal statements
e: list containing
Variable declaration when a variable assignment is tracked.
"""
# Python's variable declerations
#----------------------------------------------------------------------------------------------------------------------------------
count0 = 0
count1 = 0
process_vars_d = []
vars0 = []
var0 = ''
var1 = ''
#----------------------------------------------------------------------------------------------------------------------------------
print("process_vars:", process_vars)
# Erasing duplicated registrations in "process_vars[]"
#----------------------------------------------------------------------------------------------------------------------------------
for i in range(len(process_vars)):
vars0 = []
#flag_process_vars = 0
if ((process_vars[i][0] == "name_left") or (process_vars[i][0] == "name_right")):
var0 = process_vars[i][1].replace('=', '')
var0 = var0.replace('! ', '')
var0 = var0.replace('>', '')
var0 = var0.replace('<', '')
var0 = var0.replace(' ', '')
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice_var0"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice_var1"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice_var01"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_item"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_item_var"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item_var0"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item_var1"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item_var01"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var0"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var1"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var2"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][3]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var01"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var02"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][3]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var12"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
var0 = process_vars[i][1][3]
vars0.append(var0)
flag_process_vars = 0
for n in range(0, len(vars0)):
for j in range(len(process_vars_d)):
if ((process_vars_d[j][0] == "name_left") or (process_vars_d[j][0] == "name_right")):
var1 = process_vars_d[j][1].replace('=', '')
var1 = var1.replace('! ', '')
var1 = var1.replace('>', '')
var1 = var1.replace('<', '')
var1 = var1.replace(' ', '')
elif (process_vars_d[j][0] == "name_right_binary_slice"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_binary_slice_var0"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_binary_slice_var1"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_binary_slice_var01"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_item"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_item_var"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_item"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_array_binary_item_var0"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_item_var1"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_item_var01"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var0"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var1"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var2"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var01"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var02"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var12"):
var1 = process_vars_d[j][1]
if (vars0[n] == var1):
if (n == 0):
flag_process_vars += 1
if (n == 1):
flag_process_vars += 2
if (n == 2):
flag_process_vars += 4
if ((process_vars[i][0] == "name_left") or (process_vars[i][0] == "name_right")):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_binary_slice"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_binary_slice_var0"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_binary_slice_var1"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][0]])
elif (flag_process_vars == 4):
pass
elif (process_vars[i][0] == "name_right_binary_slice_var01"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_item"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_item_var"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_item_var", process_vars[i][1][0]])
process_vars_d.append(["name_right_item_var", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_item_var", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_item_var", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_item"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_array_binary_item_var0"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_item_var1"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_item_var01"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_array_binary_slice_var0"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var1"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var2"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][3]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][3]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var01"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var02"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var12"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
process_vars = process_vars_d
#----------------------------------------------------------------------------------------------------------------------------------
j = assign_lines_count
for m in range(0, len(process_vars)):
if ((process_vars[m][0] == "name_left") or (process_vars[m][0] == "name_right")):
t = process_vars[m][1].replace('=', '')
t = t.replace(' ', '')
elif (process_vars[m][0] == "name_right_binary_slice"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_binary_slice_var0"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_binary_slice_var1"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_binary_slice_var01"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_item"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_item_var"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_item"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_array_binary_item_var0"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_item_var1"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_item_var01"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_array_binary_slice_var0"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var1"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var2"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var01"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var02"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var12"):
t = process_vars[m][1]
for i in range (0, len(signals)):
if (t == signals[i]['N']):
if (signals[i]['D'] == 'v'):
L = signals[i]['L'].__doc__
n = signals[i]['N'].__doc__
if (m == 0):
sp = ''
while 1:
if (assign_lines[j][0] == "process_sens_list"):
assign_lines[j][0] = assign_lines[j][0] + "_var"
for k in range(0, assign_lines[j][4]):
sp = sp + ' '
assign_lines[j][1] = assign_lines[j][1].replace("begin", '')
assign_lines[j][1] = assign_lines[j][1] + "\n\n" + sp + "-- Variables"
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "-------------------------------------------------------------------"
if (signals[i]['T'] == 'b'):
if (L.find("int") == 0):
if (n.find("list") == 0):
for k in range(len(signals_intr[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (L.find("list") == 0):
if (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals_intr[i]['L'][0] > signals_intr[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
break
elif (signals[i]['T'] == "int"):
if (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
elif (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals_intr[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
break
elif (signals[i]['T'] == "arrb"):
if (n.find("str") == 0):
if (signals[i]['L'][1][0] > signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type type" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals_intr[i]['L'][1][0]) + " downto " + str(signals_intr[i]['L'][1][1]) + ");\n"
elif (signals[i]['L'][1][0] < signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type type" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals_intr[i]['L'][1][0]) + " to " + str(signals_intr[i]['L'][1][1]) + ");\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": \"" + signals[i]['V'] + "\";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": {"
for k in range(0, (signals[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\"};\n"
elif (k != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\", "
count0 = count0 + 1
break
elif (signals[i]['T'] == "arri"):
if (n.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type type" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of integer range " + str(signals[i]['L'][1][0]) + " to " + str(signals[i]['L'][1][1]) + ";\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": " + str(signals[i]['V']) + ";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": {"
for k in range(0, (signals_intr[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + "};\n"
elif (j != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ", "
count0 = count0 + 1
break
elif (signals[i]['T'] == 's'):
v = signals[i]['V'].__doc__
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type state_type" + str(count1) + " is ("
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'] + ");\n"
elif (v.find("list") == 0):
for k in range(len(signals[i]['V'])):
if (k == (len(signals[i]['V']) - 1)):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ", "
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "signal " + args[i]['N'] + ": state_type" + str(count1) + ";\n"
count1 = count1 + 1
break
elif (j == 0):
break
j = j - 1
elif (m != 0):
if (signals[i]['T'] == 'b'):
if (L.find("int") == 0):
if (n.find("list") == 0):
for k in range(len(signals_intr[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (L.find("list") == 0):
if (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals_intr[i]['L'][0] > signals_intr[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
elif (signals[i]['T'] == "int"):
if (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
elif (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals_intr[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
elif (signals[i]['T'] == "arrb"):
if (n.find("str") == 0):
if (signals[i]['L'][1][0] > signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "type typev" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals[i]['L'][1][0]) + " downto " + str(signals[i]['L'][1][1]) + ");\n"
elif (signals[i]['L'][1][0] < signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "type typev" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals_intr[i]['L'][1][0]) + " to " + str(signals_intr[i]['L'][1][1]) + ");\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": \"" + signals[i]['V'] + "\";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": {"
for k in range(0, (signals[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\"};\n"
elif (k != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\", "
count0 = count0 + 1
elif (signals[i]['T'] == "arri"):
if (n.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "type typev" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of integer range " + str(signals[i]['L'][1][0]) + " to " + str(signals[i]['L'][1][1]) + ";\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": " + str(signals[i]['V']) + ";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": {"
for k in range(0, (signals[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + str(signals[i]['V'][k]) + "};\n"
elif (j != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + str(signals[i]['V'][k]) + ", "
count0 = count0 + 1
elif (signals[i]['T'] == 's'):
v = signals[i]['V'].__doc__
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type state_typev" + str(count1) + " is ("
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'] + ");\n"
elif (v.find("list") == 0):
for k in range(len(signals[i]['V'])):
if (k == (len(signals[i]['V']) - 1)):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ", "
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "signal " + args[i]['N'] + ": state_typev" + str(count1) + ";\n"
count1 = count1 + 1
if (len(process_vars) > 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "-------------------------------------------------------------------"
assign_lines[j][1] = assign_lines[j][1] + "\n\n" + sp + "begin\n\n"
| en | 0.531553 | ***************************************************************************** * H E A D E R I N F O R M A T I O N * * ***************************************************************************** Project Name: SysPy (System Python) http://cgi.di.uoa.gr/~evlog/syspy.html File Name: _var_declaration.py Created by: <NAME> ***************************************************************************** * C O P Y R I G H T N O T I C E * * ***************************************************************************** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; version 2.1 of the License, a copy of which is available from http://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ***************************************************************************** * D E S C R I P T I O N * * ***************************************************************************** Variable declaration when a variable assignment is tracked. FUNCTION: var_declaration(a int, b(), c[], d[], e[]) a: assign lines counter integer b: token's tupple c: list containing the VHDL code d: list containing the signal statements e: list containing Variable declaration when a variable assignment is tracked. # Python's variable declerations #---------------------------------------------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------------------------------------------- # Erasing duplicated registrations in "process_vars[]" #---------------------------------------------------------------------------------------------------------------------------------- #flag_process_vars = 0 #---------------------------------------------------------------------------------------------------------------------------------- | 1.561566 | 2 |
Giraffe/Functions.py | MaggieIllustrations/softuni-github-programming | 0 | 8303 | def say_hi(name,age):
print("Hello " + name + ", you are " + age)
say_hi("Mike", "35")
def cube(num): # function
return num*num*num
result = cube(4) # variable
print(result)
| def say_hi(name,age):
print("Hello " + name + ", you are " + age)
say_hi("Mike", "35")
def cube(num): # function
return num*num*num
result = cube(4) # variable
print(result)
| en | 0.193963 | # function # variable | 3.507782 | 4 |
airspace_surgery.py | wipfli/airspaces | 1 | 8304 | <gh_stars>1-10
import glob
import json
path_in = './airspaces/'
path_out = './airspaces_processed/'
filenames = [path.split('/')[-1] for path in glob.glob(path_in + '*')]
remove = {
'france_fr.geojson': [
314327,
314187,
314360,
314359,
314362,
314361,
314364,
314363,
314333,
314329,
314331,
],
'germany_de.geojson': [
307563,
307638,
307639,
307640,
]
}
replacements = {
'france_fr.geojson': [
['Bale10 119.35', 'Bale 10 TMA 130.9'],
['Bale1 119.35', 'Bale 1 TMA 130.9'],
['Bale2 119.35', 'Bale 2 TMA 130.9'],
['Bale3 119.35', 'Bale 3 TMA 130.9'],
['Bale4 119.35', 'Bale 4 TMA 130.9'],
['Bale5 119.35', 'Bale 5 TMA 130.9'],
['Bale5 119.35', 'Bale 5 TMA 130.9'],
['Bale6 119.35', 'Bale 6 TMA 130.9'],
['Bale7 119.35', 'Bale 7 TMA 130.9'],
['Bale8 119.35', 'Bale 8 TMA 130.9'],
['Bale9 119.35', 'Bale 9 TMA 130.9'],
['Bale AZ4T1 134.67', 'Bale T1 TMA HX 134.68'],
['Bale AZ4T2 134.67', 'Bale T2 TMA HX 134.68'],
['Bale AZ4T3 134.67', 'Bale T3 TMA HX 134.68'],
['CTR BALE', 'Bale CTR 118.3']
],
'switzerland_ch.geojson': [
['ZURICH 10 TMA 118.1', 'ZURICH 10 TMA 124.7'],
['ZURICH 11 TMA 118.1', 'ZURICH 11 TMA 124.7'],
['ZURICH 12 TMA 118.1', 'ZURICH 12 TMA 124.7'],
['ZURICH 13 TMA 118.1', 'ZURICH 13 TMA 124.7'],
['ZURICH 14 TMA 118.1', 'ZURICH 14 TMA HX 127.755'],
['ZURICH 15 TMA 118.1', 'ZURICH 15 TMA HX 127.755'],
['ZURICH 1 TMA 118.1', 'ZURICH 1 TMA 124.7'],
['ZURICH 2 CTR 118.1', 'ZURICH 2 CTR HX 118.975'],
['ZURICH 2 TMA 118.1', 'ZURICH 2 TMA 124.7'],
['ZURICH 3 TMA 118.1', 'ZURICH 3 TMA 124.7'],
['ZURICH 4A TMA 118.1', 'ZURICH 4A TMA 124.7'],
['ZURICH 4B TMA 118.1', 'ZURICH 4B TMA 124.7'],
['ZURICH 4C TMA 118.1', 'ZURICH 4C TMA 124.7'],
['ZURICH 5 TMA 118.1', 'ZURICH 5 TMA 124.7'],
['ZURICH 6 TMA 118.1', 'ZURICH 6 TMA 124.7'],
['ZURICH 7 TMA 118.1', 'ZURICH 7 TMA 124.7'],
['ZURICH 8 TMA 118.1', 'ZURICH 8 TMA 124.7'],
['ZURICH 9 TMA 118.1', 'ZURICH 9 TMA 124.7'],
['BERN 1 TMA 121.025', 'BERN 1 TMA HX 127.325'],
['BERN 2 TMA 121.025', 'BERN 2 TMA HX 127.325'],
['BERN CTR 121.025', 'BERN CTR HX 121.025'],
['EMMEN 1 CTR 120.425', 'EMMEN 1 CTR HX 120.425'],
['EMMEN 1 TMA 120.425', 'EMMEN 1 TMA HX 134.130'],
['EMMEN 2 CTR 120.425', 'EMMEN 2 CTR HX 120.425'],
['EMMEN 2 TMA 120.425', 'EMMEN 2 TMA HX 134.130'],
['EMMEN 3 TMA 120.425', 'EMMEN 3 TMA HX 134.130'],
['EMMEN 4 TMA 120.425', 'EMMEN 4 TMA HX 134.130'],
['EMMEN 5 TMA 120.425', 'EMMEN 5 TMA HX 134.130'],
['EMMEN 6 TMA 120.425', 'EMMEN 6 TMA HX 134.130'],
]
}
for filename in filenames:
print(filename)
with open(path_in + filename) as f:
data = json.load(f)
if filename in replacements:
targets = [r[0] for r in replacements[filename]]
for feature in data['features']:
if feature['properties']['N'] in targets:
print('replace ' + feature['properties']['N'] + '...')
feature['properties']['N'] = next(x for x in replacements[filename] if x[0] == feature['properties']['N'])[1]
if filename in remove:
features_out = [f for f in data['features'] if int(f['properties']['ID']) not in remove[filename]]
else:
features_out = data['features']
print('removed ' + str(len(data['features']) - len(features_out)) + ' features')
geojson = {
'type': 'FeatureCollection',
'features': features_out
}
print('write ' + filename + '...')
with open(path_out + filename, 'w') as f:
json.dump(geojson, f)
all_features = []
for filename in filenames:
print('read ' + filename + '...')
with open(path_out + filename) as f:
all_features += json.load(f)['features']
print('write airspaces.geojson...')
with open('airspaces.geojson', 'w') as f:
json.dump({
'type': 'FeatureCollection',
'features': all_features
}, f)
print('done')
| import glob
import json
path_in = './airspaces/'
path_out = './airspaces_processed/'
filenames = [path.split('/')[-1] for path in glob.glob(path_in + '*')]
remove = {
'france_fr.geojson': [
314327,
314187,
314360,
314359,
314362,
314361,
314364,
314363,
314333,
314329,
314331,
],
'germany_de.geojson': [
307563,
307638,
307639,
307640,
]
}
replacements = {
'france_fr.geojson': [
['Bale10 119.35', 'Bale 10 TMA 130.9'],
['Bale1 119.35', 'Bale 1 TMA 130.9'],
['Bale2 119.35', 'Bale 2 TMA 130.9'],
['Bale3 119.35', 'Bale 3 TMA 130.9'],
['Bale4 119.35', 'Bale 4 TMA 130.9'],
['Bale5 119.35', 'Bale 5 TMA 130.9'],
['Bale5 119.35', 'Bale 5 TMA 130.9'],
['Bale6 119.35', 'Bale 6 TMA 130.9'],
['Bale7 119.35', 'Bale 7 TMA 130.9'],
['Bale8 119.35', 'Bale 8 TMA 130.9'],
['Bale9 119.35', 'Bale 9 TMA 130.9'],
['Bale AZ4T1 134.67', 'Bale T1 TMA HX 134.68'],
['Bale AZ4T2 134.67', 'Bale T2 TMA HX 134.68'],
['Bale AZ4T3 134.67', 'Bale T3 TMA HX 134.68'],
['CTR BALE', 'Bale CTR 118.3']
],
'switzerland_ch.geojson': [
['ZURICH 10 TMA 118.1', 'ZURICH 10 TMA 124.7'],
['ZURICH 11 TMA 118.1', 'ZURICH 11 TMA 124.7'],
['ZURICH 12 TMA 118.1', 'ZURICH 12 TMA 124.7'],
['ZURICH 13 TMA 118.1', 'ZURICH 13 TMA 124.7'],
['ZURICH 14 TMA 118.1', 'ZURICH 14 TMA HX 127.755'],
['ZURICH 15 TMA 118.1', 'ZURICH 15 TMA HX 127.755'],
['ZURICH 1 TMA 118.1', 'ZURICH 1 TMA 124.7'],
['ZURICH 2 CTR 118.1', 'ZURICH 2 CTR HX 118.975'],
['ZURICH 2 TMA 118.1', 'ZURICH 2 TMA 124.7'],
['ZURICH 3 TMA 118.1', 'ZURICH 3 TMA 124.7'],
['ZURICH 4A TMA 118.1', 'ZURICH 4A TMA 124.7'],
['ZURICH 4B TMA 118.1', 'ZURICH 4B TMA 124.7'],
['ZURICH 4C TMA 118.1', 'ZURICH 4C TMA 124.7'],
['ZURICH 5 TMA 118.1', 'ZURICH 5 TMA 124.7'],
['ZURICH 6 TMA 118.1', 'ZURICH 6 TMA 124.7'],
['ZURICH 7 TMA 118.1', 'ZURICH 7 TMA 124.7'],
['ZURICH 8 TMA 118.1', 'ZURICH 8 TMA 124.7'],
['ZURICH 9 TMA 118.1', 'ZURICH 9 TMA 124.7'],
['BERN 1 TMA 121.025', 'BERN 1 TMA HX 127.325'],
['BERN 2 TMA 121.025', 'BERN 2 TMA HX 127.325'],
['BERN CTR 121.025', 'BERN CTR HX 121.025'],
['EMMEN 1 CTR 120.425', 'EMMEN 1 CTR HX 120.425'],
['EMMEN 1 TMA 120.425', 'EMMEN 1 TMA HX 134.130'],
['EMMEN 2 CTR 120.425', 'EMMEN 2 CTR HX 120.425'],
['EMMEN 2 TMA 120.425', 'EMMEN 2 TMA HX 134.130'],
['EMMEN 3 TMA 120.425', 'EMMEN 3 TMA HX 134.130'],
['EMMEN 4 TMA 120.425', 'EMMEN 4 TMA HX 134.130'],
['EMMEN 5 TMA 120.425', 'EMMEN 5 TMA HX 134.130'],
['EMMEN 6 TMA 120.425', 'EMMEN 6 TMA HX 134.130'],
]
}
for filename in filenames:
print(filename)
with open(path_in + filename) as f:
data = json.load(f)
if filename in replacements:
targets = [r[0] for r in replacements[filename]]
for feature in data['features']:
if feature['properties']['N'] in targets:
print('replace ' + feature['properties']['N'] + '...')
feature['properties']['N'] = next(x for x in replacements[filename] if x[0] == feature['properties']['N'])[1]
if filename in remove:
features_out = [f for f in data['features'] if int(f['properties']['ID']) not in remove[filename]]
else:
features_out = data['features']
print('removed ' + str(len(data['features']) - len(features_out)) + ' features')
geojson = {
'type': 'FeatureCollection',
'features': features_out
}
print('write ' + filename + '...')
with open(path_out + filename, 'w') as f:
json.dump(geojson, f)
all_features = []
for filename in filenames:
print('read ' + filename + '...')
with open(path_out + filename) as f:
all_features += json.load(f)['features']
print('write airspaces.geojson...')
with open('airspaces.geojson', 'w') as f:
json.dump({
'type': 'FeatureCollection',
'features': all_features
}, f)
print('done') | none | 1 | 2.387058 | 2 |
|
AndroidSpider/spider_main.py | lidenghong1/SmallReptileTraining | 1 | 8305 | from AndroidSpider import url_manager, html_downloader, html_parser, html_output
'''
爬取百度百科 Android 关键词相关词及简介并输出为一个HTML tab网页
Extra module:
BeautifulSoup
'''
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownLoader()
self.parser = html_parser.HtmlParser()
self.out_put = html_output.HtmlOutput()
def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print("craw %d : %s" % (count, new_url))
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.100 Safari/537.36"
}
html_content = self.downloader.download(new_url, retry_count=2, headers=headers)
new_urls, new_data = self.parser.parse(new_url, html_content, "utf-8")
self.urls.add_new_urls(new_urls)
self.out_put.collect_data(new_data)
if count >= 30:
break
count = count + 1
except Exception as e:
print("craw failed!\n"+str(e))
self.out_put.output_html()
if __name__ == "__main__":
rootUrl = "http://baike.baidu.com/item/Android"
objSpider = SpiderMain()
objSpider.craw(rootUrl)
| from AndroidSpider import url_manager, html_downloader, html_parser, html_output
'''
爬取百度百科 Android 关键词相关词及简介并输出为一个HTML tab网页
Extra module:
BeautifulSoup
'''
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownLoader()
self.parser = html_parser.HtmlParser()
self.out_put = html_output.HtmlOutput()
def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print("craw %d : %s" % (count, new_url))
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.100 Safari/537.36"
}
html_content = self.downloader.download(new_url, retry_count=2, headers=headers)
new_urls, new_data = self.parser.parse(new_url, html_content, "utf-8")
self.urls.add_new_urls(new_urls)
self.out_put.collect_data(new_data)
if count >= 30:
break
count = count + 1
except Exception as e:
print("craw failed!\n"+str(e))
self.out_put.output_html()
if __name__ == "__main__":
rootUrl = "http://baike.baidu.com/item/Android"
objSpider = SpiderMain()
objSpider.craw(rootUrl)
| zh | 0.595744 | 爬取百度百科 Android 关键词相关词及简介并输出为一个HTML tab网页 Extra module: BeautifulSoup | 2.966678 | 3 |
trompace/mutations/__init__.py | trompamusic/ce-queries-template | 1 | 8306 | MUTATION = '''mutation {{
{mutation}
}}'''
def _verify_additional_type(additionaltype):
"""Check that the input to additionaltype is a list of strings.
If it is empty, raise ValueError
If it is a string, convert it to a list of strings."""
if additionaltype is None:
return None
if isinstance(additionaltype, str):
additionaltype = [additionaltype]
if len(additionaltype) == 0:
raise ValueError("additionaltype must be a non-empty list")
return additionaltype
| MUTATION = '''mutation {{
{mutation}
}}'''
def _verify_additional_type(additionaltype):
"""Check that the input to additionaltype is a list of strings.
If it is empty, raise ValueError
If it is a string, convert it to a list of strings."""
if additionaltype is None:
return None
if isinstance(additionaltype, str):
additionaltype = [additionaltype]
if len(additionaltype) == 0:
raise ValueError("additionaltype must be a non-empty list")
return additionaltype
| en | 0.810291 | mutation {{ {mutation} }} Check that the input to additionaltype is a list of strings. If it is empty, raise ValueError If it is a string, convert it to a list of strings. | 2.809383 | 3 |
Web_App/infrastructure/infra.py | CapitalOneDevExchangeHackathon/Financial-Fitness | 0 | 8307 | import boto
import boto3
from config import Config
dynamodb = boto3.resource('dynamodb',
aws_access_key_id=Config.AWS_KEY,
aws_secret_access_key=Config.AWS_SECRET_KEY,
region_name=Config.REGION)
table = dynamodb.Table('user_details')
tables = boto3.resource('dynamodb', aws_access_key_id=Config.AWS_KEY,
aws_secret_access_key=Config.AWS_SECRET_KEY, region_name=Config.REGION).Table('user_details')
print(tables.creation_date_time)
def main():
print("29.7604267")
def insert_into_db(user):
print(user.lastname)
try:
table.put_item(
Item={
'pin': user.pin,
'firstname': user.firstname,
'lastname': user.lastname,
}
)
except Exception as E:
print(E)
return False
return True
if __name__ == "__main__":
main()
| import boto
import boto3
from config import Config
dynamodb = boto3.resource('dynamodb',
aws_access_key_id=Config.AWS_KEY,
aws_secret_access_key=Config.AWS_SECRET_KEY,
region_name=Config.REGION)
table = dynamodb.Table('user_details')
tables = boto3.resource('dynamodb', aws_access_key_id=Config.AWS_KEY,
aws_secret_access_key=Config.AWS_SECRET_KEY, region_name=Config.REGION).Table('user_details')
print(tables.creation_date_time)
def main():
print("29.7604267")
def insert_into_db(user):
print(user.lastname)
try:
table.put_item(
Item={
'pin': user.pin,
'firstname': user.firstname,
'lastname': user.lastname,
}
)
except Exception as E:
print(E)
return False
return True
if __name__ == "__main__":
main()
| none | 1 | 2.558914 | 3 |
|
numberTheory/natural.py | ndarwin314/symbolicPy | 0 | 8308 | <reponame>ndarwin314/symbolicPy<filename>numberTheory/natural.py
# TODO: implement algorithms in c++ or something to make them fast
| # TODO: implement algorithms in c++ or something to make them fast | en | 0.827268 | # TODO: implement algorithms in c++ or something to make them fast | 1.102762 | 1 |
SelfTests.py | TeaPackCZ/RobotZed | 0 | 8309 | <filename>SelfTests.py
import os
import unittest
from Logger import Logger
class TestLogger(unittest.TestCase):
def test_file_handling(self):
testLog = Logger("testLog")
## Check if program can create and open file
self.assertTrue(testLog.opened)
returns = testLog.close()
## Check if logger correctly signs bool OPENED and returns
## 0 as succes.
self.assertFalse(testLog.opened)
self.assertEqual(returns,0)
returns = testLog.close()
## Check if logger returns 1 when trying to close already
## closed file
self.assertEqual(returns,1)
## Do cleanup:
os.remove(testLog.name)
def test_logging(self):
testLog = Logger("testLog")
testPhrase = "TestLine\r\n"
testLog.save_line(testPhrase)
testLog.close()
logfile = open(testLog.name)
content = logfile.read()
logfile.close()
saved = content.split(" : ")
## Check if saved data corresponds
self.assertEqual(saved[1],testPhrase)
## cleanup
os.remove(testLog.name)
from gpsNavigation import gpsModule,gpsPoint
class TestGPSNavigation(unittest.TestCase):
def test_gps_angles(self):
gpsMod = gpsModule()
A = gpsPoint(10,10)
B = gpsPoint(10.1,10.1)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15623.0)
self.assertEqual(azimut,45.0)
B = gpsPoint(10.0,10.1)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,10963.0)
self.assertEqual(azimut,90.0)
B = gpsPoint(9.9,10.1)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15625.0)
self.assertEqual(azimut,135.0)
B = gpsPoint(9.9,10.0)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,11132.0)
self.assertEqual(azimut,180.0)
B = gpsPoint(9.9,9.9)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15625.0)
self.assertEqual(azimut,225.0)
B = gpsPoint(10.0,9.9)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,10963.0)
self.assertEqual(azimut,270.0)
B = gpsPoint(10.1,9.9)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15623.0)
self.assertEqual(azimut,315.0)
B = gpsPoint(10.1,10.0)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,11132.0)
self.assertEqual(azimut,0)
if __name__ == '__main__':
unittest.main()
| <filename>SelfTests.py
import os
import unittest
from Logger import Logger
class TestLogger(unittest.TestCase):
def test_file_handling(self):
testLog = Logger("testLog")
## Check if program can create and open file
self.assertTrue(testLog.opened)
returns = testLog.close()
## Check if logger correctly signs bool OPENED and returns
## 0 as succes.
self.assertFalse(testLog.opened)
self.assertEqual(returns,0)
returns = testLog.close()
## Check if logger returns 1 when trying to close already
## closed file
self.assertEqual(returns,1)
## Do cleanup:
os.remove(testLog.name)
def test_logging(self):
testLog = Logger("testLog")
testPhrase = "TestLine\r\n"
testLog.save_line(testPhrase)
testLog.close()
logfile = open(testLog.name)
content = logfile.read()
logfile.close()
saved = content.split(" : ")
## Check if saved data corresponds
self.assertEqual(saved[1],testPhrase)
## cleanup
os.remove(testLog.name)
from gpsNavigation import gpsModule,gpsPoint
class TestGPSNavigation(unittest.TestCase):
def test_gps_angles(self):
gpsMod = gpsModule()
A = gpsPoint(10,10)
B = gpsPoint(10.1,10.1)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15623.0)
self.assertEqual(azimut,45.0)
B = gpsPoint(10.0,10.1)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,10963.0)
self.assertEqual(azimut,90.0)
B = gpsPoint(9.9,10.1)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15625.0)
self.assertEqual(azimut,135.0)
B = gpsPoint(9.9,10.0)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,11132.0)
self.assertEqual(azimut,180.0)
B = gpsPoint(9.9,9.9)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15625.0)
self.assertEqual(azimut,225.0)
B = gpsPoint(10.0,9.9)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,10963.0)
self.assertEqual(azimut,270.0)
B = gpsPoint(10.1,9.9)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15623.0)
self.assertEqual(azimut,315.0)
B = gpsPoint(10.1,10.0)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,11132.0)
self.assertEqual(azimut,0)
if __name__ == '__main__':
unittest.main()
| en | 0.627601 | ## Check if program can create and open file ## Check if logger correctly signs bool OPENED and returns ## 0 as succes. ## Check if logger returns 1 when trying to close already ## closed file ## Do cleanup: ## Check if saved data corresponds ## cleanup | 3.248226 | 3 |
manga_py/parser.py | Abijithkrishna/manga-py | 0 | 8310 | from logging import warning
from requests import get
from .info import Info
from .provider import Provider
from .providers import get_provider
class Parser:
def __init__(self, args: dict):
self.params = args
def init_provider(
self,
chapter_progress: callable = None,
global_progress: callable = None,
log: callable = None,
quest: callable = None,
info: Info = None,
quest_password: callable = None,
):
original_url = self.params.get('url', '')
provider_url = self.params.get('force_provider', None)
provider = get_provider(provider_url or original_url)
if isinstance(provider, bool):
raise AttributeError('Provider not found')
# update url (if redirect)
self.provider = provider(info) # type: Provider
self.provider.original_url = original_url
real_url = self.check_url(original_url)
if self.provider.allow_auto_change_url():
if real_url != original_url:
warning('Manga url changed! New url: {}'.format(real_url))
self.params['url'] = real_url
self.provider.quiet = self.params.get('quiet', False)
self.provider.set_chapter_progress_callback(chapter_progress)
self.provider.set_global_progress_callback(global_progress)
self.provider.set_log_callback(log)
self.provider.set_quest_callback(quest)
self.provider.set_quest_password_callback(quest_password)
def start(self):
self.provider.process(self.params['url'], self.params)
def check_url(self, url):
proxy = self.params.get('proxy', None)
proxies = {
'http': proxy,
'https': proxy,
} if proxy else None
with get(url, stream=True, proxies=proxies) as response:
_url = response.url
if url != _url:
url = _url
return url
| from logging import warning
from requests import get
from .info import Info
from .provider import Provider
from .providers import get_provider
class Parser:
def __init__(self, args: dict):
self.params = args
def init_provider(
self,
chapter_progress: callable = None,
global_progress: callable = None,
log: callable = None,
quest: callable = None,
info: Info = None,
quest_password: callable = None,
):
original_url = self.params.get('url', '')
provider_url = self.params.get('force_provider', None)
provider = get_provider(provider_url or original_url)
if isinstance(provider, bool):
raise AttributeError('Provider not found')
# update url (if redirect)
self.provider = provider(info) # type: Provider
self.provider.original_url = original_url
real_url = self.check_url(original_url)
if self.provider.allow_auto_change_url():
if real_url != original_url:
warning('Manga url changed! New url: {}'.format(real_url))
self.params['url'] = real_url
self.provider.quiet = self.params.get('quiet', False)
self.provider.set_chapter_progress_callback(chapter_progress)
self.provider.set_global_progress_callback(global_progress)
self.provider.set_log_callback(log)
self.provider.set_quest_callback(quest)
self.provider.set_quest_password_callback(quest_password)
def start(self):
self.provider.process(self.params['url'], self.params)
def check_url(self, url):
proxy = self.params.get('proxy', None)
proxies = {
'http': proxy,
'https': proxy,
} if proxy else None
with get(url, stream=True, proxies=proxies) as response:
_url = response.url
if url != _url:
url = _url
return url
| it | 0.268926 | # update url (if redirect) # type: Provider | 2.346423 | 2 |
src/villages/migrations/0008_auto_20161228_2209.py | pwelzel/bornhack-website | 0 | 8311 | <filename>src/villages/migrations/0008_auto_20161228_2209.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-28 22:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('villages', '0007_village_camp'),
]
operations = [
migrations.AlterField(
model_name='village',
name='camp',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='camps.Camp'),
),
]
| <filename>src/villages/migrations/0008_auto_20161228_2209.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-28 22:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('villages', '0007_village_camp'),
]
operations = [
migrations.AlterField(
model_name='village',
name='camp',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='camps.Camp'),
),
]
| en | 0.787238 | # -*- coding: utf-8 -*- # Generated by Django 1.10.4 on 2016-12-28 22:09 | 1.450286 | 1 |
customers/views.py | sindhumadhadi09/CustomerMgmt | 0 | 8312 | <reponame>sindhumadhadi09/CustomerMgmt
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Customer
class IndexView(generic.ListView):
template_name = 'customers/index.html'
context_object_name = 'customers_list'
def get_queryset(self):
return Customer.objects.all()
class CustomerView(generic.TemplateView):
template_name = 'customers/detail.html'
def add_customer(request):
customer = Customer()
customer.customer_firstname = request.POST['fname']
customer.customer_lastname = request.POST['lname']
customer.customer_address = request.POST['address']
customer.customer_city = request.POST['city']
customer.customer_zipcode = request.POST['zip']
customer.customer_state = request.POST['state']
customer.save()
return HttpResponseRedirect(reverse('customers:index'))
def delete_customer(request, customer_id):
p = Customer.objects.get(pk=customer_id)
p.delete()
return HttpResponseRedirect(reverse('customers:index')) | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Customer
class IndexView(generic.ListView):
template_name = 'customers/index.html'
context_object_name = 'customers_list'
def get_queryset(self):
return Customer.objects.all()
class CustomerView(generic.TemplateView):
template_name = 'customers/detail.html'
def add_customer(request):
customer = Customer()
customer.customer_firstname = request.POST['fname']
customer.customer_lastname = request.POST['lname']
customer.customer_address = request.POST['address']
customer.customer_city = request.POST['city']
customer.customer_zipcode = request.POST['zip']
customer.customer_state = request.POST['state']
customer.save()
return HttpResponseRedirect(reverse('customers:index'))
def delete_customer(request, customer_id):
p = Customer.objects.get(pk=customer_id)
p.delete()
return HttpResponseRedirect(reverse('customers:index')) | none | 1 | 2.181892 | 2 |
|
salt/ext/tornado/test/import_test.py | yuriks/salt | 1 | 8313 | # flake8: noqa
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from salt.ext.tornado.test.util import unittest
class ImportTest(unittest.TestCase):
def test_import_everything(self):
# Some of our modules are not otherwise tested. Import them
# all (unless they have external dependencies) here to at
# least ensure that there are no syntax errors.
import tornado.auth
import tornado.autoreload
import tornado.concurrent
import tornado.escape
import tornado.gen
import tornado.http1connection
import tornado.httpclient
import tornado.httpserver
import tornado.httputil
import tornado.ioloop
import tornado.iostream
import tornado.locale
import tornado.log
import tornado.netutil
import tornado.options
import tornado.process
import tornado.simple_httpclient
import tornado.stack_context
import tornado.tcpserver
import tornado.tcpclient
import tornado.template
import tornado.testing
import tornado.util
import tornado.web
import tornado.websocket
import tornado.wsgi
# for modules with dependencies, if those dependencies can be loaded,
# load them too.
def test_import_pycurl(self):
try:
import pycurl # type: ignore
except ImportError:
pass
else:
import tornado.curl_httpclient
| # flake8: noqa
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from salt.ext.tornado.test.util import unittest
class ImportTest(unittest.TestCase):
def test_import_everything(self):
# Some of our modules are not otherwise tested. Import them
# all (unless they have external dependencies) here to at
# least ensure that there are no syntax errors.
import tornado.auth
import tornado.autoreload
import tornado.concurrent
import tornado.escape
import tornado.gen
import tornado.http1connection
import tornado.httpclient
import tornado.httpserver
import tornado.httputil
import tornado.ioloop
import tornado.iostream
import tornado.locale
import tornado.log
import tornado.netutil
import tornado.options
import tornado.process
import tornado.simple_httpclient
import tornado.stack_context
import tornado.tcpserver
import tornado.tcpclient
import tornado.template
import tornado.testing
import tornado.util
import tornado.web
import tornado.websocket
import tornado.wsgi
# for modules with dependencies, if those dependencies can be loaded,
# load them too.
def test_import_pycurl(self):
try:
import pycurl # type: ignore
except ImportError:
pass
else:
import tornado.curl_httpclient
| en | 0.858266 | # flake8: noqa # pylint: skip-file # Some of our modules are not otherwise tested. Import them # all (unless they have external dependencies) here to at # least ensure that there are no syntax errors. # for modules with dependencies, if those dependencies can be loaded, # load them too. # type: ignore | 2.1261 | 2 |
butterfree/configs/db/metastore_config.py | fossabot/butterfree | 0 | 8314 | """Holds configurations to read and write with Spark to AWS S3."""
import os
from typing import Any, Dict, List, Optional
from pyspark.sql import DataFrame
from butterfree.configs import environment
from butterfree.configs.db import AbstractWriteConfig
from butterfree.dataframe_service import extract_partition_values
class MetastoreConfig(AbstractWriteConfig):
"""Configuration for Spark metastore database stored.
By default the configuration is for AWS S3.
Attributes:
path: database root location.
mode: writing mode used be writers.
format_: expected stored file format.
file_system: file schema uri, like: s3a, file.
"""
def __init__(
self,
path: str = None,
mode: str = None,
format_: str = None,
file_system: str = None,
):
self.path = path
self.mode = mode
self.format_ = format_
self.file_system = file_system
@property
def path(self) -> Optional[str]:
"""Bucket name."""
return self.__path
@path.setter
def path(self, value: str) -> None:
self.__path = value or environment.get_variable("FEATURE_STORE_S3_BUCKET")
@property
def format_(self) -> Optional[str]:
"""Expected stored file format."""
return self.__format
@format_.setter
def format_(self, value: str) -> None:
self.__format = value or "parquet"
@property
def mode(self) -> Optional[str]:
"""Writing mode used be writers."""
return self.__mode
@mode.setter
def mode(self, value: str) -> None:
self.__mode = value or "overwrite"
@property
def file_system(self) -> Optional[str]:
"""Writing mode used be writers."""
return self.__file_system
@file_system.setter
def file_system(self, value: str) -> None:
self.__file_system = value or "s3a"
def get_options(self, key: str) -> Dict[Optional[str], Optional[str]]:
"""Get options for Metastore.
Options will be a dictionary with the write and read configuration for
Spark Metastore.
Args:
key: path to save data into Metastore.
Returns:
Options configuration for Metastore.
"""
return {
"mode": self.mode,
"format_": self.format_,
"path": os.path.join(f"{self.file_system}://{self.path}/", key),
}
def get_path_with_partitions(self, key: str, dataframe: DataFrame) -> List:
"""Get options for AWS S3 from partitioned parquet file.
Options will be a dictionary with the write and read configuration for
Spark to AWS S3.
Args:
key: path to save data into AWS S3 bucket.
dataframe: spark dataframe containing data from a feature set.
Returns:
A list of string for file-system backed data sources.
"""
path_list = []
dataframe_values = extract_partition_values(
dataframe, partition_columns=["year", "month", "day"]
)
for row in dataframe_values:
path_list.append(
f"{self.file_system}://{self.path}/{key}/year={row['year']}/"
f"month={row['month']}/day={row['day']}"
)
return path_list
def translate(self, schema: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Translate feature set spark schema to the corresponding database."""
pass
| """Holds configurations to read and write with Spark to AWS S3."""
import os
from typing import Any, Dict, List, Optional
from pyspark.sql import DataFrame
from butterfree.configs import environment
from butterfree.configs.db import AbstractWriteConfig
from butterfree.dataframe_service import extract_partition_values
class MetastoreConfig(AbstractWriteConfig):
"""Configuration for Spark metastore database stored.
By default the configuration is for AWS S3.
Attributes:
path: database root location.
mode: writing mode used be writers.
format_: expected stored file format.
file_system: file schema uri, like: s3a, file.
"""
def __init__(
self,
path: str = None,
mode: str = None,
format_: str = None,
file_system: str = None,
):
self.path = path
self.mode = mode
self.format_ = format_
self.file_system = file_system
@property
def path(self) -> Optional[str]:
"""Bucket name."""
return self.__path
@path.setter
def path(self, value: str) -> None:
self.__path = value or environment.get_variable("FEATURE_STORE_S3_BUCKET")
@property
def format_(self) -> Optional[str]:
"""Expected stored file format."""
return self.__format
@format_.setter
def format_(self, value: str) -> None:
self.__format = value or "parquet"
@property
def mode(self) -> Optional[str]:
"""Writing mode used be writers."""
return self.__mode
@mode.setter
def mode(self, value: str) -> None:
self.__mode = value or "overwrite"
@property
def file_system(self) -> Optional[str]:
"""Writing mode used be writers."""
return self.__file_system
@file_system.setter
def file_system(self, value: str) -> None:
self.__file_system = value or "s3a"
def get_options(self, key: str) -> Dict[Optional[str], Optional[str]]:
"""Get options for Metastore.
Options will be a dictionary with the write and read configuration for
Spark Metastore.
Args:
key: path to save data into Metastore.
Returns:
Options configuration for Metastore.
"""
return {
"mode": self.mode,
"format_": self.format_,
"path": os.path.join(f"{self.file_system}://{self.path}/", key),
}
def get_path_with_partitions(self, key: str, dataframe: DataFrame) -> List:
"""Get options for AWS S3 from partitioned parquet file.
Options will be a dictionary with the write and read configuration for
Spark to AWS S3.
Args:
key: path to save data into AWS S3 bucket.
dataframe: spark dataframe containing data from a feature set.
Returns:
A list of string for file-system backed data sources.
"""
path_list = []
dataframe_values = extract_partition_values(
dataframe, partition_columns=["year", "month", "day"]
)
for row in dataframe_values:
path_list.append(
f"{self.file_system}://{self.path}/{key}/year={row['year']}/"
f"month={row['month']}/day={row['day']}"
)
return path_list
def translate(self, schema: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Translate feature set spark schema to the corresponding database."""
pass
| en | 0.683056 | Holds configurations to read and write with Spark to AWS S3. Configuration for Spark metastore database stored. By default the configuration is for AWS S3. Attributes: path: database root location. mode: writing mode used be writers. format_: expected stored file format. file_system: file schema uri, like: s3a, file. Bucket name. Expected stored file format. Writing mode used be writers. Writing mode used be writers. Get options for Metastore. Options will be a dictionary with the write and read configuration for Spark Metastore. Args: key: path to save data into Metastore. Returns: Options configuration for Metastore. Get options for AWS S3 from partitioned parquet file. Options will be a dictionary with the write and read configuration for Spark to AWS S3. Args: key: path to save data into AWS S3 bucket. dataframe: spark dataframe containing data from a feature set. Returns: A list of string for file-system backed data sources. Translate feature set spark schema to the corresponding database. | 2.69569 | 3 |
examples/2-objects.py | johanngan/special_relativity | 4 | 8315 | #!/usr/bin/env python3
import sys
sys.path.append('..')
import specrel.geom as geom
import specrel.spacetime.physical as phy
import specrel.visualize as vis
# Shared parameters
include_grid = True
include_legend = True
tlim = (0, 2)
xlim = (-2, 2)
# A stationary point object
stationary = phy.MovingObject(0, draw_options={'label': '$v = 0$'})
## Alternate:
# direction = (1, 0)
# point = (0, 0)
# stationary = geom.Line(direction, point, draw_options={'label': '$v = 0$'})
title='Stationary object'
p = vis.stplot(stationary, title=title, tlim=tlim, xlim=xlim,
grid=include_grid, legend=include_legend)
p.save('2-objects_stationary_point.png')
p.show()
# A stationary point object, animated
anim = vis.stanimate(stationary, title=title, tlim=tlim, xlim=xlim,
grid=include_grid, legend=include_legend)
anim.save('2-objects_stationary_point_anim.mp4')
anim.show()
# A stationary point object, animated with worldline
anim = vis.stanimate_with_worldline(stationary, title=title,
tlim=tlim, xlim=xlim, grid=include_grid, legend=include_legend,
legend_loc='upper right')
anim.save('2-objects_stationary_point_anim_worldline.mp4')
anim.show()
# A bunch of moving point objects, animated
moving = phy.MovingObject(0, velocity=1/2,
draw_options={'color': 'red', 'label': '$v = c/2$'})
light = phy.MovingObject(0, velocity=1,
draw_options={'color': 'gold', 'label': '$v = c$'})
ftl = phy.MovingObject(0, velocity=3/2,
draw_options={'color': 'cyan', 'label': '$v = 3c/2$'})
objects = geom.Collection([stationary, moving, light, ftl])
title = 'Various objects'
anim = vis.stanimate_with_worldline(objects, title=title,
current_time_color='magenta', tlim=tlim, xlim=xlim, grid=include_grid,
legend=include_legend, legend_loc='upper left')
anim.save('2-objects_moving_points.mp4')
anim.show()
# A moving meterstick
meterstick = phy.MovingObject(-1/2, length=1, velocity=1/2,
draw_options={'label': 'Meterstick'})
# # Alternate:
# direction = (1, 1/2)
# left = geom.Line(direction, (0, -1/2))
# right = geom.Line(direction, (0, 1/2))
# meterstick = geom.Ribbon(left, right, draw_options={'label': 'Meterstick'})
title = 'Moving meterstick ($v = c/2$)'
anim = vis.stanimate_with_worldline(meterstick, title=title,
tlim=tlim, xlim=xlim, grid=include_grid, legend=include_legend,
legend_loc='upper left')
anim.save('2-objects_moving_meterstick.mp4')
anim.show()
| #!/usr/bin/env python3
import sys
sys.path.append('..')
import specrel.geom as geom
import specrel.spacetime.physical as phy
import specrel.visualize as vis
# Shared parameters
include_grid = True
include_legend = True
tlim = (0, 2)
xlim = (-2, 2)
# A stationary point object
stationary = phy.MovingObject(0, draw_options={'label': '$v = 0$'})
## Alternate:
# direction = (1, 0)
# point = (0, 0)
# stationary = geom.Line(direction, point, draw_options={'label': '$v = 0$'})
title='Stationary object'
p = vis.stplot(stationary, title=title, tlim=tlim, xlim=xlim,
grid=include_grid, legend=include_legend)
p.save('2-objects_stationary_point.png')
p.show()
# A stationary point object, animated
anim = vis.stanimate(stationary, title=title, tlim=tlim, xlim=xlim,
grid=include_grid, legend=include_legend)
anim.save('2-objects_stationary_point_anim.mp4')
anim.show()
# A stationary point object, animated with worldline
anim = vis.stanimate_with_worldline(stationary, title=title,
tlim=tlim, xlim=xlim, grid=include_grid, legend=include_legend,
legend_loc='upper right')
anim.save('2-objects_stationary_point_anim_worldline.mp4')
anim.show()
# A bunch of moving point objects, animated
moving = phy.MovingObject(0, velocity=1/2,
draw_options={'color': 'red', 'label': '$v = c/2$'})
light = phy.MovingObject(0, velocity=1,
draw_options={'color': 'gold', 'label': '$v = c$'})
ftl = phy.MovingObject(0, velocity=3/2,
draw_options={'color': 'cyan', 'label': '$v = 3c/2$'})
objects = geom.Collection([stationary, moving, light, ftl])
title = 'Various objects'
anim = vis.stanimate_with_worldline(objects, title=title,
current_time_color='magenta', tlim=tlim, xlim=xlim, grid=include_grid,
legend=include_legend, legend_loc='upper left')
anim.save('2-objects_moving_points.mp4')
anim.show()
# A moving meterstick
meterstick = phy.MovingObject(-1/2, length=1, velocity=1/2,
draw_options={'label': 'Meterstick'})
# # Alternate:
# direction = (1, 1/2)
# left = geom.Line(direction, (0, -1/2))
# right = geom.Line(direction, (0, 1/2))
# meterstick = geom.Ribbon(left, right, draw_options={'label': 'Meterstick'})
title = 'Moving meterstick ($v = c/2$)'
anim = vis.stanimate_with_worldline(meterstick, title=title,
tlim=tlim, xlim=xlim, grid=include_grid, legend=include_legend,
legend_loc='upper left')
anim.save('2-objects_moving_meterstick.mp4')
anim.show()
| en | 0.673224 | #!/usr/bin/env python3 # Shared parameters # A stationary point object ## Alternate: # direction = (1, 0) # point = (0, 0) # stationary = geom.Line(direction, point, draw_options={'label': '$v = 0$'}) # A stationary point object, animated # A stationary point object, animated with worldline # A bunch of moving point objects, animated # A moving meterstick # # Alternate: # direction = (1, 1/2) # left = geom.Line(direction, (0, -1/2)) # right = geom.Line(direction, (0, 1/2)) # meterstick = geom.Ribbon(left, right, draw_options={'label': 'Meterstick'}) | 2.207962 | 2 |
firmware/modulator.py | mfkiwl/OpenXcvr | 14 | 8316 | from baremetal import *
from math import pi, sin, cos
import sys
from scale import scale
from settings import *
from ssb import ssb_polar
def modulator(clk, audio, audio_stb, settings):
audio_bits = audio.subtype.bits
#AM modulation
am_mag = Unsigned(12).constant(0) + audio + 2048
am_phase = Signed(32).constant(0)
am_stb = audio_stb
#FM modulation
fm_mag = Unsigned(12).constant(4095)
frequency = Signed(32).constant(0) + audio
nfm_scaled_frequency = frequency * (2**(32-audio_bits) * 5 / 50)
nfm_phase = nfm_scaled_frequency.subtype.register(clk, en=audio_stb, init=0)
nfm_phase.d(nfm_phase + nfm_scaled_frequency)
scaled_frequency = frequency * (2**(32-audio_bits) * 8 / 50)
fm_phase = scaled_frequency.subtype.register(clk, en=audio_stb, init=0)
fm_phase.d(fm_phase + scaled_frequency)
fm_stb = Boolean().register(clk, d=audio_stb, init=0)
#ssb
ssb_mag, ssb_phase, ssb_stb = ssb_polar(clk, audio, audio_stb, settings.mode==LSB)
ssb_mag <<= 1
ssb_phase = Signed(32).constant(0) + ssb_phase
ssb_phase <<= (32 - audio_bits)
#cw modulation
cw_mag = Unsigned(12).constant(0)
cw_phase = Signed(32).constant(0)
cw_stb = audio_stb
#mode switching
magnitude = Unsigned(12).select(settings.mode, am_mag, fm_mag, fm_mag, ssb_mag, ssb_mag, cw_mag)
phase = Signed(32).select(settings.mode, am_phase, nfm_phase, fm_phase, ssb_phase, ssb_phase, cw_phase)
stb = Boolean().select(settings.mode, am_stb, fm_stb, fm_stb, ssb_stb, ssb_stb, cw_stb)
return magnitude, phase, audio_stb
import numpy as np
from matplotlib import pyplot as plt
def test_modulator(stimulus, mode):
settings = Settings()
settings.mode = Unsigned(3).input("filter_mode")
clk = Clock("clk")
audio_in = Signed(12).input("i_data_in")
audio_stb_in = Boolean().input("stb_in")
i, q, stb = modulator(clk, audio_in, audio_stb_in, settings)
#simulate
clk.initialise()
settings.mode.set(mode)
response = []
for data in stimulus:
for j in range(200):
audio_stb_in.set(j==199)
audio_in.set(data)
clk.tick()
if stb.get():
print i.get(), q.get()
if i.get() is None or q.get() is None:
continue
response.append(i.get()*(2**20)+1j*q.get())
response = np.array(response)
plt.title("Modulator")
plt.xlabel("Time (samples)")
plt.ylabel("Value")
a, = plt.plot(np.real(response), label="I")
b, = plt.plot(np.imag(response), label="Q")
c, = plt.plot(stimulus*(2**20), label="Audio Input")
plt.legend(handles=[a, b, c])
plt.show()
if __name__ == "__main__" and "sim" in sys.argv:
#mode am stim am
stimulus=(
np.sin(np.arange(1000)*2.0*pi*0.02)*1023+
np.sin(np.arange(1000)*2.0*pi*0.03)*1023
)
#test_modulator(stimulus, FM)
#test_modulator(stimulus, FM)
#test_modulator(stimulus, NBFM)
test_modulator(stimulus, USB)
| from baremetal import *
from math import pi, sin, cos
import sys
from scale import scale
from settings import *
from ssb import ssb_polar
def modulator(clk, audio, audio_stb, settings):
audio_bits = audio.subtype.bits
#AM modulation
am_mag = Unsigned(12).constant(0) + audio + 2048
am_phase = Signed(32).constant(0)
am_stb = audio_stb
#FM modulation
fm_mag = Unsigned(12).constant(4095)
frequency = Signed(32).constant(0) + audio
nfm_scaled_frequency = frequency * (2**(32-audio_bits) * 5 / 50)
nfm_phase = nfm_scaled_frequency.subtype.register(clk, en=audio_stb, init=0)
nfm_phase.d(nfm_phase + nfm_scaled_frequency)
scaled_frequency = frequency * (2**(32-audio_bits) * 8 / 50)
fm_phase = scaled_frequency.subtype.register(clk, en=audio_stb, init=0)
fm_phase.d(fm_phase + scaled_frequency)
fm_stb = Boolean().register(clk, d=audio_stb, init=0)
#ssb
ssb_mag, ssb_phase, ssb_stb = ssb_polar(clk, audio, audio_stb, settings.mode==LSB)
ssb_mag <<= 1
ssb_phase = Signed(32).constant(0) + ssb_phase
ssb_phase <<= (32 - audio_bits)
#cw modulation
cw_mag = Unsigned(12).constant(0)
cw_phase = Signed(32).constant(0)
cw_stb = audio_stb
#mode switching
magnitude = Unsigned(12).select(settings.mode, am_mag, fm_mag, fm_mag, ssb_mag, ssb_mag, cw_mag)
phase = Signed(32).select(settings.mode, am_phase, nfm_phase, fm_phase, ssb_phase, ssb_phase, cw_phase)
stb = Boolean().select(settings.mode, am_stb, fm_stb, fm_stb, ssb_stb, ssb_stb, cw_stb)
return magnitude, phase, audio_stb
import numpy as np
from matplotlib import pyplot as plt
def test_modulator(stimulus, mode):
settings = Settings()
settings.mode = Unsigned(3).input("filter_mode")
clk = Clock("clk")
audio_in = Signed(12).input("i_data_in")
audio_stb_in = Boolean().input("stb_in")
i, q, stb = modulator(clk, audio_in, audio_stb_in, settings)
#simulate
clk.initialise()
settings.mode.set(mode)
response = []
for data in stimulus:
for j in range(200):
audio_stb_in.set(j==199)
audio_in.set(data)
clk.tick()
if stb.get():
print i.get(), q.get()
if i.get() is None or q.get() is None:
continue
response.append(i.get()*(2**20)+1j*q.get())
response = np.array(response)
plt.title("Modulator")
plt.xlabel("Time (samples)")
plt.ylabel("Value")
a, = plt.plot(np.real(response), label="I")
b, = plt.plot(np.imag(response), label="Q")
c, = plt.plot(stimulus*(2**20), label="Audio Input")
plt.legend(handles=[a, b, c])
plt.show()
if __name__ == "__main__" and "sim" in sys.argv:
#mode am stim am
stimulus=(
np.sin(np.arange(1000)*2.0*pi*0.02)*1023+
np.sin(np.arange(1000)*2.0*pi*0.03)*1023
)
#test_modulator(stimulus, FM)
#test_modulator(stimulus, FM)
#test_modulator(stimulus, NBFM)
test_modulator(stimulus, USB)
| en | 0.241182 | #AM modulation #FM modulation #ssb #cw modulation #mode switching #simulate #mode am stim am #test_modulator(stimulus, FM) #test_modulator(stimulus, FM) #test_modulator(stimulus, NBFM) | 2.235083 | 2 |
tests/sentry/auth/test_helper.py | pierredup/sentry | 0 | 8317 | from __future__ import absolute_import
from six.moves.urllib.parse import urlencode
from django.test import RequestFactory
from django.contrib.auth.models import AnonymousUser
from sentry.auth.helper import handle_new_user
from sentry.models import AuthProvider, InviteStatus, OrganizationMember
from sentry.testutils import TestCase
from sentry.utils.compat import mock
class HandleNewUserTest(TestCase):
@mock.patch("sentry.analytics.record")
def test_simple(self, mock_record):
provider = "dummy"
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
auth_provider = AuthProvider.objects.create(
organization=self.organization, provider=provider
)
identity = {"id": "1234", "email": "<EMAIL>", "name": "Morty"}
auth_identity = handle_new_user(auth_provider, self.organization, request, identity)
user = auth_identity.user
assert user.email == identity["email"]
assert OrganizationMember.objects.filter(organization=self.organization, user=user).exists()
signup_record = [r for r in mock_record.call_args_list if r[0][0] == "user.signup"]
assert signup_record == [
mock.call(
"user.signup", user_id=user.id, source="sso", provider=provider, referrer="in-app"
)
]
def test_associated_existing_member_invite_by_email(self):
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
provider = AuthProvider.objects.create(organization=self.organization, provider="dummy")
identity = {"id": "1234", "email": "<EMAIL>", "name": "Morty"}
member = OrganizationMember.objects.create(
organization=self.organization, email=identity["email"]
)
auth_identity = handle_new_user(provider, self.organization, request, identity)
assigned_member = OrganizationMember.objects.get(
organization=self.organization, user=auth_identity.user
)
assert assigned_member.id == member.id
def test_associated_existing_member_invite_request(self):
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
provider = AuthProvider.objects.create(organization=self.organization, provider="dummy")
identity = {"id": "1234", "email": "<EMAIL>", "name": "Morty"}
member = self.create_member(
organization=self.organization,
email=identity["email"],
invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value,
)
auth_identity = handle_new_user(provider, self.organization, request, identity)
assert OrganizationMember.objects.filter(
organization=self.organization,
user=auth_identity.user,
invite_status=InviteStatus.APPROVED.value,
).exists()
assert not OrganizationMember.objects.filter(id=member.id).exists()
def test_associate_pending_invite(self):
provider = AuthProvider.objects.create(organization=self.organization, provider="dummy")
identity = {"id": "1234", "email": "<EMAIL>", "name": "Morty"}
# The org member invite should have a non matching email, but the
# member id and token will match from the cookie, allowing association
member = OrganizationMember.objects.create(
organization=self.organization, email="<EMAIL>", token="abc"
)
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
request.COOKIES["pending-invite"] = urlencode(
{"memberId": member.id, "token": member.token, "url": ""}
)
auth_identity = handle_new_user(provider, self.organization, request, identity)
assigned_member = OrganizationMember.objects.get(
organization=self.organization, user=auth_identity.user
)
assert assigned_member.id == member.id
| from __future__ import absolute_import
from six.moves.urllib.parse import urlencode
from django.test import RequestFactory
from django.contrib.auth.models import AnonymousUser
from sentry.auth.helper import handle_new_user
from sentry.models import AuthProvider, InviteStatus, OrganizationMember
from sentry.testutils import TestCase
from sentry.utils.compat import mock
class HandleNewUserTest(TestCase):
@mock.patch("sentry.analytics.record")
def test_simple(self, mock_record):
provider = "dummy"
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
auth_provider = AuthProvider.objects.create(
organization=self.organization, provider=provider
)
identity = {"id": "1234", "email": "<EMAIL>", "name": "Morty"}
auth_identity = handle_new_user(auth_provider, self.organization, request, identity)
user = auth_identity.user
assert user.email == identity["email"]
assert OrganizationMember.objects.filter(organization=self.organization, user=user).exists()
signup_record = [r for r in mock_record.call_args_list if r[0][0] == "user.signup"]
assert signup_record == [
mock.call(
"user.signup", user_id=user.id, source="sso", provider=provider, referrer="in-app"
)
]
def test_associated_existing_member_invite_by_email(self):
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
provider = AuthProvider.objects.create(organization=self.organization, provider="dummy")
identity = {"id": "1234", "email": "<EMAIL>", "name": "Morty"}
member = OrganizationMember.objects.create(
organization=self.organization, email=identity["email"]
)
auth_identity = handle_new_user(provider, self.organization, request, identity)
assigned_member = OrganizationMember.objects.get(
organization=self.organization, user=auth_identity.user
)
assert assigned_member.id == member.id
def test_associated_existing_member_invite_request(self):
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
provider = AuthProvider.objects.create(organization=self.organization, provider="dummy")
identity = {"id": "1234", "email": "<EMAIL>", "name": "Morty"}
member = self.create_member(
organization=self.organization,
email=identity["email"],
invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value,
)
auth_identity = handle_new_user(provider, self.organization, request, identity)
assert OrganizationMember.objects.filter(
organization=self.organization,
user=auth_identity.user,
invite_status=InviteStatus.APPROVED.value,
).exists()
assert not OrganizationMember.objects.filter(id=member.id).exists()
def test_associate_pending_invite(self):
provider = AuthProvider.objects.create(organization=self.organization, provider="dummy")
identity = {"id": "1234", "email": "<EMAIL>", "name": "Morty"}
# The org member invite should have a non matching email, but the
# member id and token will match from the cookie, allowing association
member = OrganizationMember.objects.create(
organization=self.organization, email="<EMAIL>", token="abc"
)
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
request.COOKIES["pending-invite"] = urlencode(
{"memberId": member.id, "token": member.token, "url": ""}
)
auth_identity = handle_new_user(provider, self.organization, request, identity)
assigned_member = OrganizationMember.objects.get(
organization=self.organization, user=auth_identity.user
)
assert assigned_member.id == member.id
| en | 0.962702 | # The org member invite should have a non matching email, but the # member id and token will match from the cookie, allowing association | 2.003538 | 2 |
groundstation/broadcast_events/__init__.py | richo/groundstation | 26 | 8318 | <reponame>richo/groundstation
from broadcast_ping import BroadcastPing
EVENT_TYPES = {
"PING": BroadcastPing,
}
class UnknownBroadcastEvent(Exception):
pass
def new_broadcast_event(data):
event_type, payload = data.split(" ", 1)
if event_type not in EVENT_TYPES:
raise UnknownBroadcastEvent(event_type)
return EVENT_TYPES[event_type](payload)
| from broadcast_ping import BroadcastPing
EVENT_TYPES = {
"PING": BroadcastPing,
}
class UnknownBroadcastEvent(Exception):
pass
def new_broadcast_event(data):
event_type, payload = data.split(" ", 1)
if event_type not in EVENT_TYPES:
raise UnknownBroadcastEvent(event_type)
return EVENT_TYPES[event_type](payload) | none | 1 | 2.740414 | 3 |
|
mbta_python/__init__.py | dougzor/mbta_python | 0 | 8319 | import datetime
import requests
from mbta_python.models import Stop, Direction, Schedule, Mode, \
TripSchedule, Alert, StopWithMode, Prediction
HOST = "http://realtime.mbta.com/developer/api/v2"
def datetime_to_epoch(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
return int((dt - epoch).total_seconds())
class MBTASDK(object):
"""Wrapper around calls to the MBTA Realtime API
"""
def __init__(self, api_key):
self.api_key = api_key
def _make_request(self, path, params):
url = "{}/{}".format(HOST, path)
response = requests.get(url, params=params)
data = response.json()
error = data.get("error")
if error:
raise Exception(error["message"])
return response.json()
def get_stops_by_location(self, latitude, longitude):
"""Get a List of Stops sorted by proximity to the given
latitude and longitude
"""
params = {
"lat": latitude,
"lon": longitude,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("stopsbylocation", params)
stops = [Stop(stop_data) for stop_data in data["stop"]]
return stops
def get_stops_by_route(self, route_id):
"""Return a List of Directions for the route_id
that contain a list of Stops that Direction and Route serve
"""
params = {
"route": route_id,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("stopsbyroute", params)
return [Direction(d) for d in data["direction"]]
def get_routes_by_stop(self, stop_id):
"""Return a list of routes that serve a particular stop
"""
params = {
"stop": stop_id,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("routesbystop", params)
return StopWithMode(data)
def get_schedules_by_stop(self, stop_id, route_id=None, direction_id=None,
date=None, max_time=None, max_trips=None):
"""Return scheduled arrivals and departures for a direction and route for a
particular stop.
stop_id - Stop ID
route_id - Route ID, If not included then schedule for all routes
serving the stop will be returned,
direction_id - Direction ID, If included then route must also be
included if not included then schedule for all
directions of the route serving the stop will be
returned
date - Time after which schedule should be returned. If included
then must be within the next seven (7) days
If not included then schedule starting from the current
datetime will be returned
max_time - Defines maximum range of time (in minutes) within which
trips will be returned. If not included defaults to 60.
max_trips - Defines number of trips to return. Integer between 1 and
100. If not included defaults to 5.
"""
params = {
"stop": stop_id,
"api_key": self.api_key,
"format": "json",
"route": route_id,
"direction": direction_id,
"datetime": datetime_to_epoch(date) if date else None,
"max_time": max_time,
"max_trips": max_trips
}
data = self._make_request("schedulebystop", params)
return Schedule(data)
def get_schedules_by_routes(self, route_ids, date=None,
max_time=None, max_trips=None):
"""Return the scheduled arrivals and departures in a direction
for a particular route or routes.
route_ids - List of Route IDs, or single Route ID
date - Time after which schedule should be returned. If included
then must be within the next seven (7) days If not included
then schedule starting from the current datetime will
be returned
max_time - Defines maximum range of time (in minutes) within which
trips will be returned. If not included defaults to 60.
max_trips - Defines number of trips to return. Integer between 1
and 100. If not included defaults to 5.
"""
if not isinstance(route_ids, list):
route_ids = [route_ids]
params = {
"routes": ",".join(route_ids),
"api_key": self.api_key,
"format": "json",
"datetime": datetime_to_epoch(date) if date else None,
"max_time": max_time,
"max_trips": max_trips
}
data = self._make_request("schedulebyroutes", params)
return [Mode(m) for m in data["mode"]]
def get_schedules_by_trip(self, trip_id, date=None):
"""Return the scheduled arrivals and departures in a direction
for a particular route or routes.
route_ids - List of Route IDs, or single Route ID
date - Time after which schedule should be returned. If included then
must be within the next seven (7) days. If not included then
schedule starting from the current datetime will be returned
max_time - Defines maximum range of time (in minutes) within which
trips will be returned. If not included defaults to 60.
max_trips - Defines number of trips to return. Integer between 1 and
100. If not included defaults to 5.
"""
params = {
"trip": trip_id,
"api_key": self.api_key,
"format": "json",
"datetime": datetime_to_epoch(date) if date else None,
}
data = self._make_request("schedulebytrip", params)
return TripSchedule(data)
def get_predictions_by_stop(self, stop_id, include_access_alerts=False,
include_service_alerts=True):
"""Return predicted arrivals and departures in the next hour for a
direction and route for a particular stop.
stop_id - Stop ID
include_access_alerts - Whether or not alerts pertaining to
accessibility (elevators, escalators) should be
returned
include_service_alerts - Whether or not service alerts should be
returned
"""
params = {
"stop": stop_id,
"api_key": self.api_key,
"format": "json",
"include_access_alerts": include_access_alerts,
"include_service_alerts": include_service_alerts
}
data = self._make_request("predictionsbystop", params)
return Prediction(data)
def get_predictions_by_routes(self, route_ids, include_access_alerts=False,
include_service_alerts=True):
"""Return predictions for upcoming trips (including trips already underway)
in a direction for a particular route or routes.
route_ids - List of Route IDs, or single Route ID
include_access_alerts - Whether or not alerts pertaining to
accessibility (elevators, escalators) should be
returned
include_service_alerts - Whether or not service alerts should be
returned
"""
if not isinstance(route_ids, list):
route_ids = [route_ids]
params = {
"routes": ",".join(route_ids),
"api_key": self.api_key,
"format": "json",
"include_access_alerts": include_access_alerts,
"include_service_alerts": include_service_alerts
}
data = self._make_request("predictionsbyroutes", params)
return Prediction(data)
def get_vehicles_by_routes(self, route_ids, include_access_alerts=False,
include_service_alerts=True):
"""Return vehicle positions for upcoming trips (including trips already
underway) in a direction for a particular route or routes.
route_ids - List of Route IDs, or single Route ID
include_access_alerts - Whether or not alerts pertaining to
accessibility (elevators, escalators) should be
returned
include_service_alerts - Whether or not service alerts should be
returned
"""
if not isinstance(route_ids, list):
route_ids = [route_ids]
params = {
"routes": ",".join(route_ids),
"api_key": self.api_key,
"format": "json",
"include_access_alerts": include_access_alerts,
"include_service_alerts": include_service_alerts
}
data = self._make_request("vehiclesbyroutes", params)
return [Mode(m) for m in data]
def get_predictions_by_trip(self, trip_id):
"""Return the predicted arrivals and departures for a particular trip.
trip_id - TripID
"""
params = {
"trip": trip_id,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("predictionsbytrip", params)
return TripSchedule(data)
def get_vehicles_by_trip(self, trip_id):
"""Return the predicted vehicle positions for a particular trip.
trip_id - TripID
"""
params = {
"trip": trip_id,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("vehiclesbytrip", params)
return TripSchedule(data)
| import datetime
import requests
from mbta_python.models import Stop, Direction, Schedule, Mode, \
TripSchedule, Alert, StopWithMode, Prediction
HOST = "http://realtime.mbta.com/developer/api/v2"
def datetime_to_epoch(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
return int((dt - epoch).total_seconds())
class MBTASDK(object):
"""Wrapper around calls to the MBTA Realtime API
"""
def __init__(self, api_key):
self.api_key = api_key
def _make_request(self, path, params):
url = "{}/{}".format(HOST, path)
response = requests.get(url, params=params)
data = response.json()
error = data.get("error")
if error:
raise Exception(error["message"])
return response.json()
def get_stops_by_location(self, latitude, longitude):
"""Get a List of Stops sorted by proximity to the given
latitude and longitude
"""
params = {
"lat": latitude,
"lon": longitude,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("stopsbylocation", params)
stops = [Stop(stop_data) for stop_data in data["stop"]]
return stops
def get_stops_by_route(self, route_id):
"""Return a List of Directions for the route_id
that contain a list of Stops that Direction and Route serve
"""
params = {
"route": route_id,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("stopsbyroute", params)
return [Direction(d) for d in data["direction"]]
def get_routes_by_stop(self, stop_id):
"""Return a list of routes that serve a particular stop
"""
params = {
"stop": stop_id,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("routesbystop", params)
return StopWithMode(data)
def get_schedules_by_stop(self, stop_id, route_id=None, direction_id=None,
date=None, max_time=None, max_trips=None):
"""Return scheduled arrivals and departures for a direction and route for a
particular stop.
stop_id - Stop ID
route_id - Route ID, If not included then schedule for all routes
serving the stop will be returned,
direction_id - Direction ID, If included then route must also be
included if not included then schedule for all
directions of the route serving the stop will be
returned
date - Time after which schedule should be returned. If included
then must be within the next seven (7) days
If not included then schedule starting from the current
datetime will be returned
max_time - Defines maximum range of time (in minutes) within which
trips will be returned. If not included defaults to 60.
max_trips - Defines number of trips to return. Integer between 1 and
100. If not included defaults to 5.
"""
params = {
"stop": stop_id,
"api_key": self.api_key,
"format": "json",
"route": route_id,
"direction": direction_id,
"datetime": datetime_to_epoch(date) if date else None,
"max_time": max_time,
"max_trips": max_trips
}
data = self._make_request("schedulebystop", params)
return Schedule(data)
def get_schedules_by_routes(self, route_ids, date=None,
max_time=None, max_trips=None):
"""Return the scheduled arrivals and departures in a direction
for a particular route or routes.
route_ids - List of Route IDs, or single Route ID
date - Time after which schedule should be returned. If included
then must be within the next seven (7) days If not included
then schedule starting from the current datetime will
be returned
max_time - Defines maximum range of time (in minutes) within which
trips will be returned. If not included defaults to 60.
max_trips - Defines number of trips to return. Integer between 1
and 100. If not included defaults to 5.
"""
if not isinstance(route_ids, list):
route_ids = [route_ids]
params = {
"routes": ",".join(route_ids),
"api_key": self.api_key,
"format": "json",
"datetime": datetime_to_epoch(date) if date else None,
"max_time": max_time,
"max_trips": max_trips
}
data = self._make_request("schedulebyroutes", params)
return [Mode(m) for m in data["mode"]]
def get_schedules_by_trip(self, trip_id, date=None):
"""Return the scheduled arrivals and departures in a direction
for a particular route or routes.
route_ids - List of Route IDs, or single Route ID
date - Time after which schedule should be returned. If included then
must be within the next seven (7) days. If not included then
schedule starting from the current datetime will be returned
max_time - Defines maximum range of time (in minutes) within which
trips will be returned. If not included defaults to 60.
max_trips - Defines number of trips to return. Integer between 1 and
100. If not included defaults to 5.
"""
params = {
"trip": trip_id,
"api_key": self.api_key,
"format": "json",
"datetime": datetime_to_epoch(date) if date else None,
}
data = self._make_request("schedulebytrip", params)
return TripSchedule(data)
def get_predictions_by_stop(self, stop_id, include_access_alerts=False,
include_service_alerts=True):
"""Return predicted arrivals and departures in the next hour for a
direction and route for a particular stop.
stop_id - Stop ID
include_access_alerts - Whether or not alerts pertaining to
accessibility (elevators, escalators) should be
returned
include_service_alerts - Whether or not service alerts should be
returned
"""
params = {
"stop": stop_id,
"api_key": self.api_key,
"format": "json",
"include_access_alerts": include_access_alerts,
"include_service_alerts": include_service_alerts
}
data = self._make_request("predictionsbystop", params)
return Prediction(data)
def get_predictions_by_routes(self, route_ids, include_access_alerts=False,
include_service_alerts=True):
"""Return predictions for upcoming trips (including trips already underway)
in a direction for a particular route or routes.
route_ids - List of Route IDs, or single Route ID
include_access_alerts - Whether or not alerts pertaining to
accessibility (elevators, escalators) should be
returned
include_service_alerts - Whether or not service alerts should be
returned
"""
if not isinstance(route_ids, list):
route_ids = [route_ids]
params = {
"routes": ",".join(route_ids),
"api_key": self.api_key,
"format": "json",
"include_access_alerts": include_access_alerts,
"include_service_alerts": include_service_alerts
}
data = self._make_request("predictionsbyroutes", params)
return Prediction(data)
def get_vehicles_by_routes(self, route_ids, include_access_alerts=False,
include_service_alerts=True):
"""Return vehicle positions for upcoming trips (including trips already
underway) in a direction for a particular route or routes.
route_ids - List of Route IDs, or single Route ID
include_access_alerts - Whether or not alerts pertaining to
accessibility (elevators, escalators) should be
returned
include_service_alerts - Whether or not service alerts should be
returned
"""
if not isinstance(route_ids, list):
route_ids = [route_ids]
params = {
"routes": ",".join(route_ids),
"api_key": self.api_key,
"format": "json",
"include_access_alerts": include_access_alerts,
"include_service_alerts": include_service_alerts
}
data = self._make_request("vehiclesbyroutes", params)
return [Mode(m) for m in data]
def get_predictions_by_trip(self, trip_id):
"""Return the predicted arrivals and departures for a particular trip.
trip_id - TripID
"""
params = {
"trip": trip_id,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("predictionsbytrip", params)
return TripSchedule(data)
def get_vehicles_by_trip(self, trip_id):
"""Return the predicted vehicle positions for a particular trip.
trip_id - TripID
"""
params = {
"trip": trip_id,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("vehiclesbytrip", params)
return TripSchedule(data)
| en | 0.880313 | Wrapper around calls to the MBTA Realtime API Get a List of Stops sorted by proximity to the given latitude and longitude Return a List of Directions for the route_id that contain a list of Stops that Direction and Route serve Return a list of routes that serve a particular stop Return scheduled arrivals and departures for a direction and route for a particular stop. stop_id - Stop ID route_id - Route ID, If not included then schedule for all routes serving the stop will be returned, direction_id - Direction ID, If included then route must also be included if not included then schedule for all directions of the route serving the stop will be returned date - Time after which schedule should be returned. If included then must be within the next seven (7) days If not included then schedule starting from the current datetime will be returned max_time - Defines maximum range of time (in minutes) within which trips will be returned. If not included defaults to 60. max_trips - Defines number of trips to return. Integer between 1 and 100. If not included defaults to 5. Return the scheduled arrivals and departures in a direction for a particular route or routes. route_ids - List of Route IDs, or single Route ID date - Time after which schedule should be returned. If included then must be within the next seven (7) days If not included then schedule starting from the current datetime will be returned max_time - Defines maximum range of time (in minutes) within which trips will be returned. If not included defaults to 60. max_trips - Defines number of trips to return. Integer between 1 and 100. If not included defaults to 5. Return the scheduled arrivals and departures in a direction for a particular route or routes. route_ids - List of Route IDs, or single Route ID date - Time after which schedule should be returned. If included then must be within the next seven (7) days. If not included then schedule starting from the current datetime will be returned max_time - Defines maximum range of time (in minutes) within which trips will be returned. If not included defaults to 60. max_trips - Defines number of trips to return. Integer between 1 and 100. If not included defaults to 5. Return predicted arrivals and departures in the next hour for a direction and route for a particular stop. stop_id - Stop ID include_access_alerts - Whether or not alerts pertaining to accessibility (elevators, escalators) should be returned include_service_alerts - Whether or not service alerts should be returned Return predictions for upcoming trips (including trips already underway) in a direction for a particular route or routes. route_ids - List of Route IDs, or single Route ID include_access_alerts - Whether or not alerts pertaining to accessibility (elevators, escalators) should be returned include_service_alerts - Whether or not service alerts should be returned Return vehicle positions for upcoming trips (including trips already underway) in a direction for a particular route or routes. route_ids - List of Route IDs, or single Route ID include_access_alerts - Whether or not alerts pertaining to accessibility (elevators, escalators) should be returned include_service_alerts - Whether or not service alerts should be returned Return the predicted arrivals and departures for a particular trip. trip_id - TripID Return the predicted vehicle positions for a particular trip. trip_id - TripID | 2.84457 | 3 |
density_model_torch_custom.py | piotrwinkler/breast_density_classifier | 0 | 8320 | <reponame>piotrwinkler/breast_density_classifier<gh_stars>0
import argparse
import glob
import os
import numpy as np
import torch
from sklearn.metrics import accuracy_score
import models_torch as models
import utils
EXPERIMENT_DATA_DIR = "/tmp/mgr"
def inference(parameters, verbose=True) -> int:
# resolve device
device = torch.device(
"cuda:{}".format(parameters["gpu_number"]) if parameters["device_type"] == "gpu"
else "cpu"
)
# load input images
datum_l_cc = utils.load_images(parameters['image_path'], 'L-CC')
datum_r_cc = utils.load_images(parameters['image_path'], 'R-CC')
datum_l_mlo = utils.load_images(parameters['image_path'], 'L-MLO')
datum_r_mlo = utils.load_images(parameters['image_path'], 'R-MLO')
# construct models and prepare data
if parameters["model_type"] == 'cnn':
model = models.BaselineBreastModel(device, nodropout_probability=1.0, gaussian_noise_std=0.0).to(device)
model.load_state_dict(torch.load(parameters["model_path"]))
x = {
"L-CC": torch.Tensor(datum_l_cc).permute(0, 3, 1, 2).to(device),
"L-MLO": torch.Tensor(datum_l_mlo).permute(0, 3, 1, 2).to(device),
"R-CC": torch.Tensor(datum_r_cc).permute(0, 3, 1, 2).to(device),
"R-MLO": torch.Tensor(datum_r_mlo).permute(0, 3, 1, 2).to(device),
}
elif parameters["model_type"] == 'histogram':
model = models.BaselineHistogramModel(num_bins=parameters["bins_histogram"]).to(device)
model.load_state_dict(torch.load(parameters["model_path"]))
x = torch.Tensor(utils.histogram_features_generator([
datum_l_cc, datum_r_cc, datum_l_mlo, datum_r_mlo
], parameters)).to(device)
else:
raise RuntimeError(parameters["model_type"])
# run prediction
with torch.no_grad():
prediction_density = model(x).cpu().numpy()
if verbose:
# nicely prints out the predictions
print('Density prediction:\n'
'\tAlmost entirely fatty (0):\t\t\t' + str(prediction_density[0, 0]) + '\n'
'\tScattered areas of fibroglandular density (1):\t' + str(prediction_density[0, 1]) + '\n'
'\tHeterogeneously dense (2):\t\t\t' + str(prediction_density[0, 2]) + '\n'
'\tExtremely dense (3):\t\t\t\t' + str(prediction_density[0, 3]) + '\n')
return np.argmax(prediction_density[0])+1 # return density in scope 1 to 4
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run Inference')
parser.add_argument('model_type')
parser.add_argument('--bins-histogram', default=50)
parser.add_argument('--model-path', default=None)
parser.add_argument('--device-type', default="cpu")
# parser.add_argument('--image-path', default="images/")
args = parser.parse_args()
parameters_ = {
"model_type": args.model_type,
"bins_histogram": args.bins_histogram,
"model_path": args.model_path,
"device_type": args.device_type,
# "image_path": args.image_path,
}
if parameters_["model_path"] is None:
if args.model_type == "histogram":
parameters_["model_path"] = "saved_models/BreastDensity_BaselineHistogramModel/model.p"
if args.model_type == "cnn":
parameters_["model_path"] = "saved_models/BreastDensity_BaselineBreastModel/model.p"
predicted_values = []
real_values = []
predicted_values_two_classes = []
real_values_two_classes = []
two_classes_mapping = {1: 0, 2: 0, 3: 1, 4: 1}
for dir in glob.glob(f"{EXPERIMENT_DATA_DIR}/*/"):
parameters_["image_path"] = dir
predicted_density = inference(parameters_)
with open(os.path.join(dir, "density.txt")) as file:
real_density = int(file.read())
print(f"Predicted density: {predicted_density}")
print(f"Real density: {real_density}\n")
print(f"Predicted density (2 cls): {two_classes_mapping[predicted_density]}")
print(f"Real density (2 cls): {two_classes_mapping[real_density]}\n")
predicted_values.append(predicted_density)
real_values.append(real_density)
predicted_values_two_classes.append(two_classes_mapping[predicted_density])
real_values_two_classes.append(two_classes_mapping[real_density])
print(f"Total accuracy: {accuracy_score(real_values, predicted_values)}")
print(f"Total accuracy two classes: {accuracy_score(real_values_two_classes, predicted_values_two_classes)}")
"""
python density_model_torch_custom.py histogram
python density_model_torch_custom.py cnn
"""
| import argparse
import glob
import os
import numpy as np
import torch
from sklearn.metrics import accuracy_score
import models_torch as models
import utils
EXPERIMENT_DATA_DIR = "/tmp/mgr"
def inference(parameters, verbose=True) -> int:
# resolve device
device = torch.device(
"cuda:{}".format(parameters["gpu_number"]) if parameters["device_type"] == "gpu"
else "cpu"
)
# load input images
datum_l_cc = utils.load_images(parameters['image_path'], 'L-CC')
datum_r_cc = utils.load_images(parameters['image_path'], 'R-CC')
datum_l_mlo = utils.load_images(parameters['image_path'], 'L-MLO')
datum_r_mlo = utils.load_images(parameters['image_path'], 'R-MLO')
# construct models and prepare data
if parameters["model_type"] == 'cnn':
model = models.BaselineBreastModel(device, nodropout_probability=1.0, gaussian_noise_std=0.0).to(device)
model.load_state_dict(torch.load(parameters["model_path"]))
x = {
"L-CC": torch.Tensor(datum_l_cc).permute(0, 3, 1, 2).to(device),
"L-MLO": torch.Tensor(datum_l_mlo).permute(0, 3, 1, 2).to(device),
"R-CC": torch.Tensor(datum_r_cc).permute(0, 3, 1, 2).to(device),
"R-MLO": torch.Tensor(datum_r_mlo).permute(0, 3, 1, 2).to(device),
}
elif parameters["model_type"] == 'histogram':
model = models.BaselineHistogramModel(num_bins=parameters["bins_histogram"]).to(device)
model.load_state_dict(torch.load(parameters["model_path"]))
x = torch.Tensor(utils.histogram_features_generator([
datum_l_cc, datum_r_cc, datum_l_mlo, datum_r_mlo
], parameters)).to(device)
else:
raise RuntimeError(parameters["model_type"])
# run prediction
with torch.no_grad():
prediction_density = model(x).cpu().numpy()
if verbose:
# nicely prints out the predictions
print('Density prediction:\n'
'\tAlmost entirely fatty (0):\t\t\t' + str(prediction_density[0, 0]) + '\n'
'\tScattered areas of fibroglandular density (1):\t' + str(prediction_density[0, 1]) + '\n'
'\tHeterogeneously dense (2):\t\t\t' + str(prediction_density[0, 2]) + '\n'
'\tExtremely dense (3):\t\t\t\t' + str(prediction_density[0, 3]) + '\n')
return np.argmax(prediction_density[0])+1 # return density in scope 1 to 4
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run Inference')
parser.add_argument('model_type')
parser.add_argument('--bins-histogram', default=50)
parser.add_argument('--model-path', default=None)
parser.add_argument('--device-type', default="cpu")
# parser.add_argument('--image-path', default="images/")
args = parser.parse_args()
parameters_ = {
"model_type": args.model_type,
"bins_histogram": args.bins_histogram,
"model_path": args.model_path,
"device_type": args.device_type,
# "image_path": args.image_path,
}
if parameters_["model_path"] is None:
if args.model_type == "histogram":
parameters_["model_path"] = "saved_models/BreastDensity_BaselineHistogramModel/model.p"
if args.model_type == "cnn":
parameters_["model_path"] = "saved_models/BreastDensity_BaselineBreastModel/model.p"
predicted_values = []
real_values = []
predicted_values_two_classes = []
real_values_two_classes = []
two_classes_mapping = {1: 0, 2: 0, 3: 1, 4: 1}
for dir in glob.glob(f"{EXPERIMENT_DATA_DIR}/*/"):
parameters_["image_path"] = dir
predicted_density = inference(parameters_)
with open(os.path.join(dir, "density.txt")) as file:
real_density = int(file.read())
print(f"Predicted density: {predicted_density}")
print(f"Real density: {real_density}\n")
print(f"Predicted density (2 cls): {two_classes_mapping[predicted_density]}")
print(f"Real density (2 cls): {two_classes_mapping[real_density]}\n")
predicted_values.append(predicted_density)
real_values.append(real_density)
predicted_values_two_classes.append(two_classes_mapping[predicted_density])
real_values_two_classes.append(two_classes_mapping[real_density])
print(f"Total accuracy: {accuracy_score(real_values, predicted_values)}")
print(f"Total accuracy two classes: {accuracy_score(real_values_two_classes, predicted_values_two_classes)}")
"""
python density_model_torch_custom.py histogram
python density_model_torch_custom.py cnn
""" | en | 0.419594 | # resolve device # load input images # construct models and prepare data # run prediction # nicely prints out the predictions # return density in scope 1 to 4 # parser.add_argument('--image-path', default="images/") # "image_path": args.image_path, python density_model_torch_custom.py histogram python density_model_torch_custom.py cnn | 2.169504 | 2 |
esmvaltool/diag_scripts/ensclus/ens_anom.py | yifatdzigan/ESMValTool | 148 | 8321 | """Computation of ensemble anomalies based on a desired value."""
import os
import numpy as np
from scipy import stats
# User-defined packages
from read_netcdf import read_iris, save_n_2d_fields
from sel_season_area import sel_area, sel_season
def ens_anom(filenames, dir_output, name_outputs, varname, numens, season,
area, extreme):
"""Ensemble anomalies.
Computation of the ensemble anomalies based on the desired value
from the input variable (it can be the percentile, mean, maximum, standard
deviation or trend)
OUTPUT: NetCDF files of ensemble mean of climatology, selected value and
anomaly maps.
"""
print('The name of the output files will be <variable>_{0}.txt'
.format(name_outputs))
print('Number of ensemble members: {0}'.format(numens))
outfiles = []
# Reading the netCDF file of 3Dfield, for all the ensemble members
var_ens = []
for ens in range(numens):
ifile = filenames[ens]
# print('ENSEMBLE MEMBER %s' %ens)
var, varunits, lat, lon, dates, _ = read_iris(ifile)
# Convertion from kg m-2 s-1 to mm/day
if varunits == 'kg m-2 s-1':
var = var * 86400 # there are 86400 seconds in a day
varunits = 'mm/day'
# Selecting a season (DJF,DJFM,NDJFM,JJA)
var_season, _ = sel_season(var, dates, season)
# Selecting only [latS-latN, lonW-lonE] box region
var_area, lat_area, lon_area = sel_area(lat, lon, var_season, area)
var_ens.append(var_area)
if varunits == 'kg m-2 s-1':
print('\nPrecipitation rate units were converted from kg m-2 s-1 '
'to mm/day')
print('The variable is {0} ({1})'.format(varname, varunits))
print('Original var shape: (time x lat x lon)={0}'.format(var.shape))
print('var shape after selecting season {0} and area {1}: '
'(time x lat x lon)={2}'.format(season, area, var_area.shape))
if extreme == 'mean':
# Compute the time mean over the entire period, for each ens member
varextreme_ens = [np.nanmean(var_ens[i], axis=0)
for i in range(numens)]
elif len(extreme.split("_")) == 2:
# Compute the chosen percentile over the period, for each ens member
quant = int(extreme.partition("th")[0])
varextreme_ens = [np.nanpercentile(var_ens[i], quant, axis=0)
for i in range(numens)]
elif extreme == 'maximum':
# Compute the maximum value over the period, for each ensemble member
varextreme_ens = [np.nanmax(var_ens[i], axis=0) for i in range(numens)]
elif extreme == 'std':
# Compute the standard deviation over the period, for each ens member
varextreme_ens = [np.nanstd(var_ens[i], axis=0) for i in range(numens)]
elif extreme == 'trend':
# Compute the linear trend over the period, for each ensemble member
trendmap = np.empty((var_ens[0].shape[1], var_ens[0].shape[2]))
trendmap_ens = []
for i in range(numens):
for jla in range(var_ens[0].shape[1]):
for jlo in range(var_ens[0].shape[2]):
slope, _, _, _, _ = \
stats.linregress(range(var_ens[0].shape[0]),
var_ens[i][:, jla, jlo])
trendmap[jla, jlo] = slope
trendmap_ens.append(trendmap.copy())
varextreme_ens = trendmap_ens
varextreme_ens_np = np.array(varextreme_ens)
print('Anomalies are computed with respect to the {0}'.format(extreme))
# Compute and save the anomalies with respect to the ensemble
ens_anomalies = varextreme_ens_np - np.nanmean(varextreme_ens_np, axis=0)
varsave = 'ens_anomalies'
ofile = os.path.join(dir_output, 'ens_anomalies_{0}.nc'
.format(name_outputs))
# print(ofile)
print('ens_anomalies shape: (numens x lat x lon)={0}'
.format(ens_anomalies.shape))
save_n_2d_fields(lat_area, lon_area, ens_anomalies, varsave,
varunits, ofile)
outfiles.append(ofile)
# Compute and save the climatology
vartimemean_ens = [np.mean(var_ens[i], axis=0) for i in range(numens)]
ens_climatologies = np.array(vartimemean_ens)
varsave = 'ens_climatologies'
ofile = os.path.join(dir_output, 'ens_climatologies_{0}.nc'
.format(name_outputs))
save_n_2d_fields(lat_area, lon_area, ens_climatologies, varsave,
varunits, ofile)
outfiles.append(ofile)
ens_extreme = varextreme_ens_np
varsave = 'ens_extreme'
ofile = os.path.join(dir_output, 'ens_extreme_{0}.nc'.format(name_outputs))
save_n_2d_fields(lat_area, lon_area, ens_extreme, varsave,
varunits, ofile)
outfiles.append(ofile)
return outfiles
| """Computation of ensemble anomalies based on a desired value."""
import os
import numpy as np
from scipy import stats
# User-defined packages
from read_netcdf import read_iris, save_n_2d_fields
from sel_season_area import sel_area, sel_season
def ens_anom(filenames, dir_output, name_outputs, varname, numens, season,
area, extreme):
"""Ensemble anomalies.
Computation of the ensemble anomalies based on the desired value
from the input variable (it can be the percentile, mean, maximum, standard
deviation or trend)
OUTPUT: NetCDF files of ensemble mean of climatology, selected value and
anomaly maps.
"""
print('The name of the output files will be <variable>_{0}.txt'
.format(name_outputs))
print('Number of ensemble members: {0}'.format(numens))
outfiles = []
# Reading the netCDF file of 3Dfield, for all the ensemble members
var_ens = []
for ens in range(numens):
ifile = filenames[ens]
# print('ENSEMBLE MEMBER %s' %ens)
var, varunits, lat, lon, dates, _ = read_iris(ifile)
# Convertion from kg m-2 s-1 to mm/day
if varunits == 'kg m-2 s-1':
var = var * 86400 # there are 86400 seconds in a day
varunits = 'mm/day'
# Selecting a season (DJF,DJFM,NDJFM,JJA)
var_season, _ = sel_season(var, dates, season)
# Selecting only [latS-latN, lonW-lonE] box region
var_area, lat_area, lon_area = sel_area(lat, lon, var_season, area)
var_ens.append(var_area)
if varunits == 'kg m-2 s-1':
print('\nPrecipitation rate units were converted from kg m-2 s-1 '
'to mm/day')
print('The variable is {0} ({1})'.format(varname, varunits))
print('Original var shape: (time x lat x lon)={0}'.format(var.shape))
print('var shape after selecting season {0} and area {1}: '
'(time x lat x lon)={2}'.format(season, area, var_area.shape))
if extreme == 'mean':
# Compute the time mean over the entire period, for each ens member
varextreme_ens = [np.nanmean(var_ens[i], axis=0)
for i in range(numens)]
elif len(extreme.split("_")) == 2:
# Compute the chosen percentile over the period, for each ens member
quant = int(extreme.partition("th")[0])
varextreme_ens = [np.nanpercentile(var_ens[i], quant, axis=0)
for i in range(numens)]
elif extreme == 'maximum':
# Compute the maximum value over the period, for each ensemble member
varextreme_ens = [np.nanmax(var_ens[i], axis=0) for i in range(numens)]
elif extreme == 'std':
# Compute the standard deviation over the period, for each ens member
varextreme_ens = [np.nanstd(var_ens[i], axis=0) for i in range(numens)]
elif extreme == 'trend':
# Compute the linear trend over the period, for each ensemble member
trendmap = np.empty((var_ens[0].shape[1], var_ens[0].shape[2]))
trendmap_ens = []
for i in range(numens):
for jla in range(var_ens[0].shape[1]):
for jlo in range(var_ens[0].shape[2]):
slope, _, _, _, _ = \
stats.linregress(range(var_ens[0].shape[0]),
var_ens[i][:, jla, jlo])
trendmap[jla, jlo] = slope
trendmap_ens.append(trendmap.copy())
varextreme_ens = trendmap_ens
varextreme_ens_np = np.array(varextreme_ens)
print('Anomalies are computed with respect to the {0}'.format(extreme))
# Compute and save the anomalies with respect to the ensemble
ens_anomalies = varextreme_ens_np - np.nanmean(varextreme_ens_np, axis=0)
varsave = 'ens_anomalies'
ofile = os.path.join(dir_output, 'ens_anomalies_{0}.nc'
.format(name_outputs))
# print(ofile)
print('ens_anomalies shape: (numens x lat x lon)={0}'
.format(ens_anomalies.shape))
save_n_2d_fields(lat_area, lon_area, ens_anomalies, varsave,
varunits, ofile)
outfiles.append(ofile)
# Compute and save the climatology
vartimemean_ens = [np.mean(var_ens[i], axis=0) for i in range(numens)]
ens_climatologies = np.array(vartimemean_ens)
varsave = 'ens_climatologies'
ofile = os.path.join(dir_output, 'ens_climatologies_{0}.nc'
.format(name_outputs))
save_n_2d_fields(lat_area, lon_area, ens_climatologies, varsave,
varunits, ofile)
outfiles.append(ofile)
ens_extreme = varextreme_ens_np
varsave = 'ens_extreme'
ofile = os.path.join(dir_output, 'ens_extreme_{0}.nc'.format(name_outputs))
save_n_2d_fields(lat_area, lon_area, ens_extreme, varsave,
varunits, ofile)
outfiles.append(ofile)
return outfiles
| en | 0.845845 | Computation of ensemble anomalies based on a desired value. # User-defined packages Ensemble anomalies. Computation of the ensemble anomalies based on the desired value from the input variable (it can be the percentile, mean, maximum, standard deviation or trend) OUTPUT: NetCDF files of ensemble mean of climatology, selected value and anomaly maps. # Reading the netCDF file of 3Dfield, for all the ensemble members # print('ENSEMBLE MEMBER %s' %ens) # Convertion from kg m-2 s-1 to mm/day # there are 86400 seconds in a day # Selecting a season (DJF,DJFM,NDJFM,JJA) # Selecting only [latS-latN, lonW-lonE] box region # Compute the time mean over the entire period, for each ens member # Compute the chosen percentile over the period, for each ens member # Compute the maximum value over the period, for each ensemble member # Compute the standard deviation over the period, for each ens member # Compute the linear trend over the period, for each ensemble member # Compute and save the anomalies with respect to the ensemble # print(ofile) # Compute and save the climatology | 2.96774 | 3 |
pytition/petition/models.py | Te-k/Pytition | 0 | 8322 | <filename>pytition/petition/models.py
from django.db import models
from django.utils.html import mark_safe, strip_tags
from django.utils.text import slugify
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.core.exceptions import ValidationError
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.conf import settings
from django.contrib.auth.hashers import get_hasher
from django.db import transaction
from django.urls import reverse
from django.db.models import Q
from tinymce import models as tinymce_models
from colorfield.fields import ColorField
import html
class Petition(models.Model):
NO = "no gradient"
RIGHT = "to right"
BOTTOM = "to bottom"
BOTTOM_RIGHT = "to bottom right"
BOTTOM_LEFT = "to bottom left"
LINEAR_GRADIENT_CHOICES = (
(NO, "no gradient"),
(RIGHT, "to right"),
(BOTTOM, "to bottom"),
(BOTTOM_RIGHT, "to bottom right"),
(BOTTOM_LEFT, "to bottom left")
)
MAIL = "MAIL"
POST = "POST"
GET = "GET"
NEWSLETTER_SUBSCRIBE_METHOD_CHOICES = (
(MAIL, "MAIL"),
(POST, "POST"),
(GET, "GET")
)
title = models.TextField(verbose_name=ugettext_lazy("Title"))
text = tinymce_models.HTMLField(blank=True)
side_text = tinymce_models.HTMLField(blank=True)
target = models.IntegerField(default=500)
linear_gradient_direction = models.CharField(choices=LINEAR_GRADIENT_CHOICES, max_length=15, default=NO, blank=True)
gradient_from = ColorField(blank=True)
gradient_to = ColorField(blank=True)
bgcolor = ColorField(blank=True)
footer_text = tinymce_models.HTMLField(blank=True)
footer_links = tinymce_models.HTMLField(blank=True)
twitter_description = models.CharField(max_length=200, blank=True)
twitter_image = models.CharField(max_length=500, blank=True)
has_newsletter = models.BooleanField(default=False)
newsletter_subscribe_http_data = models.TextField(blank=True)
newsletter_subscribe_http_mailfield = models.CharField(max_length=100, blank=True)
newsletter_subscribe_http_url = models.CharField(max_length=1000, blank=True)
newsletter_subscribe_mail_subject = models.CharField(max_length=1000, blank=True)
newsletter_subscribe_mail_from = models.CharField(max_length=500, blank=True)
newsletter_subscribe_mail_to = models.CharField(max_length=500, blank=True)
newsletter_subscribe_method = models.CharField(choices=NEWSLETTER_SUBSCRIBE_METHOD_CHOICES, max_length=4,
default=MAIL)
newsletter_subscribe_mail_smtp_host = models.CharField(max_length=100, default='localhost', blank=True)
newsletter_subscribe_mail_smtp_port = models.IntegerField(default=25, blank=True)
newsletter_subscribe_mail_smtp_user = models.CharField(max_length=200, blank=True)
newsletter_subscribe_mail_smtp_password = models.CharField(max_length=200, blank=True)
newsletter_subscribe_mail_smtp_tls = models.BooleanField(default=False)
newsletter_subscribe_mail_smtp_starttls = models.BooleanField(default=False)
org_twitter_handle = models.CharField(max_length=20, blank=True)
published = models.BooleanField(default=False)
newsletter_text = models.CharField(max_length=1000, blank=True)
sign_form_footer = models.TextField(blank=True)
confirmation_email_sender = models.CharField(max_length=100, blank=True)
confirmation_email_smtp_host = models.CharField(max_length=100, default='localhost', blank=True)
confirmation_email_smtp_port = models.IntegerField(default=25, blank=True)
confirmation_email_smtp_user = models.CharField(max_length=200, blank=True)
confirmation_email_smtp_password = models.CharField(max_length=200, blank=True)
confirmation_email_smtp_tls = models.BooleanField(default=False)
confirmation_email_smtp_starttls = models.BooleanField(default=False)
use_custom_email_settings = models.BooleanField(default=False)
salt = models.TextField(blank=True)
slugs = models.ManyToManyField('SlugModel', blank=True, through='SlugOwnership')
def prepopulate_from_template(self, template):
for field in self._meta.fields:
if hasattr(self, field.name) and hasattr(template, field.name):
template_value = getattr(template, field.name)
if template_value is not None and template_value != "":
setattr(self, field.name, template_value)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if not self.salt:
hasher = get_hasher()
self.salt = hasher.salt().decode('utf-8')
super().save()
def slugify(self):
if self.slugs.count() == 0:
slugtext = slugify(self.raw_title)
# let's search for slug collisions
filters = {'slugs__slug': slugtext}
if self.organization_set.count() > 0:
org = self.organization_set.first()
filters.update({'organization__name': org.name})
else:
user = self.pytitionuser_set.first()
filters.update({'pytitionuser__user__username': user.user.username})
results = Petition.objects.filter(**filters)
if results.count() > 0:
raise ValueError(_("This slug is already used by another petition from this organization/user"))
slug = SlugModel(slug=slugify(slugtext))
slug.save()
self.slugs.add(slug)
self.save()
@classmethod
def by_id(cls, id):
try:
return Petition.objects.get(pk=id)
except Petition.DoesNotExist:
return None
def get_signature_number(self, confirmed=None):
signatures = self.signature_set
if confirmed is not None:
signatures = signatures.filter(confirmed=confirmed)
return signatures.count()
def already_signed(self, email):
signature_number = Signature.objects.filter(petition = self.id)\
.filter(confirmed = True).filter(email = email).count()
return signature_number > 0
def confirm_signature(self, conf_hash):
signature = Signature.objects.filter(petition=self.id).get(confirmation_hash=conf_hash)
if signature:
# Now confirm the signature corresponding to this hash
signature.confirm()
signature.save()
return _("Thank you for confirming your signature!")
else:
return None
def add_slug(self, slugtext):
with transaction.atomic():
slugtext = slugify(slugtext)
slug = SlugModel.objects.create(slug=slugtext)
if self.owner_type == "org":
SlugOwnership.objects.create(slug=slug, petition=self, organization=self.owner)
elif self.owner_type == "user":
SlugOwnership.objects.create(slug=slug, petition=self, user=self.owner)
else:
raise ValueError(_("This petition has no owner, cannot add slug!"))
def del_slug(self, slug):
slug.delete()
def publish(self):
self.published = True
self.save()
def unpublish(self):
self.published = False
self.save()
@property
def owner_type(self):
if self.organization_set.count() > 0:
return "org"
elif self.pytitionuser_set.count() > 0:
return "user"
else:
return "no_owner"
@property
def owner(self):
if self.organization_set.count() > 0:
return self.organization_set.first()
elif self.pytitionuser_set.count() > 0:
return self.pytitionuser_set.first()
else:
return None
@property
def signature_number(self):
return self.get_signature_number(True)
@property
def raw_twitter_description(self):
return html.unescape(mark_safe(strip_tags(self.twitter_description)))
@property
def raw_text(self):
return html.unescape(mark_safe(strip_tags(self.text)))
@property
def raw_title(self):
return html.unescape(mark_safe(strip_tags(self.title).strip()))
def __str__(self):
return self.raw_title
def __repr__(self):
return self.raw_title
@property
def url(self):
slugs = self.slugs.all()
if len(slugs) == 0:
# If there is no slug, ugly url
return reverse('detail', kwargs={'petition_id': self.id})
else:
if self.organization_set.count() > 0:
# This petition is owned by an Organization
org = self.organization_set.first()
return reverse("slug_show_petition",
kwargs={"orgslugname": org.slugname,
"petitionname": slugs[0]})
elif self.pytitionuser_set.count() > 0:
# This petition is owned by a PytitionUser
user = self.pytitionuser_set.first()
return reverse("slug_show_petition",
kwargs={"username": user.user.username,
"petitionname": slugs[0]})
else:
# This is a BUG!
raise ValueError(_("This petition is buggy. Sorry about that!"))
class SlugOwnership(models.Model):
petition = models.ForeignKey(Petition, on_delete=models.CASCADE)
slug = models.ForeignKey('SlugModel', on_delete=models.CASCADE)
user = models.ForeignKey('PytitionUser', on_delete=models.CASCADE, blank=True, null=True, default=None)
organization = models.ForeignKey('Organization', on_delete=models.CASCADE, blank=True, null=True, default=None)
class Meta:
constraints = [
models.UniqueConstraint(fields=['slug', 'organization'], name="unique_slugnameperorg", condition=Q(user=None)),
models.UniqueConstraint(fields=['slug', 'user'], name="unique_slugnameperuser",
condition=Q(organization=None)),
]
class Signature(models.Model):
first_name = models.CharField(max_length=50, verbose_name=ugettext_lazy("First name"))
last_name = models.CharField(max_length=50, verbose_name=ugettext_lazy("Last name"))
phone = models.CharField(max_length=20, blank=True, verbose_name=ugettext_lazy("Phone number"))
email = models.EmailField(verbose_name=ugettext_lazy("Email address"))
confirmation_hash = models.CharField(max_length=128)
confirmed = models.BooleanField(default=False, verbose_name=ugettext_lazy("Confirmed"))
petition = models.ForeignKey(Petition, on_delete=models.CASCADE, verbose_name=ugettext_lazy("Petition"))
subscribed_to_mailinglist = models.BooleanField(default=False, verbose_name=ugettext_lazy("Subscribed to mailing list"))
date = models.DateTimeField(blank=True, auto_now_add=True, verbose_name=ugettext_lazy("Date"))
ipaddress = models.TextField(blank=True, null=True)
def clean(self):
if self.petition.already_signed(self.email):
if self.petition.signature_set.filter(email = self.email).get(confirmed = True).id != self.id:
raise ValidationError(_("You already signed the petition"))
def save(self, *args, **kwargs):
self.clean()
if self.confirmed:
# invalidating other signatures from same email
Signature.objects.filter(petition=self.petition).filter(email=self.email)\
.exclude(id=self.id).delete()
super().save(*args, **kwargs)
def confirm(self):
self.confirmed = True
def __str__(self):
return html.unescape("[{}:{}] {} {}".format(self.petition.id, "OK" if self.confirmed else "..", self.first_name,
self.last_name))
def __repr__(self):
return html.unescape("[{}:{}] {} {}".format(self.petition.id, "OK" if self.confirmed else "..", self.first_name,
self.last_name))
class PetitionTemplate(models.Model):
NO = "no gradient"
RIGHT = "to right"
BOTTOM = "to bottom"
BOTTOM_RIGHT = "to bottom right"
BOTTOM_LEFT = "to bottom left"
LINEAR_GRADIENT_CHOICES = (
(NO, "no gradient"),
(RIGHT, "to right"),
(BOTTOM, "to bottom"),
(BOTTOM_RIGHT, "to bottom right"),
(BOTTOM_LEFT, "to bottom left")
)
MAIL = "MAIL"
POST = "POST"
GET = "GET"
NEWSLETTER_SUBSCRIBE_METHOD_CHOICES = (
(MAIL, "MAIL"),
(POST, "POST"),
(GET, "GET")
)
name = models.CharField(max_length=50, verbose_name=ugettext_lazy("Name"), db_index=True)
text = tinymce_models.HTMLField(blank=True)
side_text = tinymce_models.HTMLField(blank=True)
target = models.IntegerField(blank=True, null=True)
linear_gradient_direction = models.CharField(choices=LINEAR_GRADIENT_CHOICES, max_length=15, default=NO, blank=True)
gradient_from = ColorField(blank=True)
gradient_to = ColorField(blank=True)
bgcolor = ColorField(blank=True)
footer_text = tinymce_models.HTMLField(blank=True)
footer_links = tinymce_models.HTMLField(blank=True)
twitter_description = models.CharField(max_length=200, blank=True)
twitter_image = models.CharField(max_length=500, blank=True)
has_newsletter = models.BooleanField(default=False)
newsletter_subscribe_http_data = models.TextField(blank=True)
newsletter_subscribe_http_mailfield = models.CharField(max_length=100, blank=True)
newsletter_subscribe_http_url = models.CharField(max_length=1000, blank=True)
newsletter_subscribe_mail_subject = models.CharField(max_length=1000, blank=True)
newsletter_subscribe_mail_from = models.EmailField(max_length=500, blank=True)
newsletter_subscribe_mail_to = models.EmailField(max_length=500, blank=True)
newsletter_subscribe_method = models.CharField(choices=NEWSLETTER_SUBSCRIBE_METHOD_CHOICES, max_length=4,
default=MAIL)
newsletter_subscribe_mail_smtp_host = models.CharField(max_length=100, default='localhost', blank=True)
newsletter_subscribe_mail_smtp_port = models.IntegerField(default=25)
newsletter_subscribe_mail_smtp_user = models.CharField(max_length=200, blank=True)
newsletter_subscribe_mail_smtp_password = models.CharField(max_length=200, blank=True)
newsletter_subscribe_mail_smtp_tls = models.BooleanField(default=False)
newsletter_subscribe_mail_smtp_starttls = models.BooleanField(default=False)
org_twitter_handle = models.CharField(max_length=20, blank=True)
newsletter_text = models.CharField(max_length=1000, blank=True)
sign_form_footer = models.TextField(blank=True)
confirmation_email_sender = models.EmailField(max_length=100, blank=True)
confirmation_email_smtp_host = models.CharField(max_length=100, default='localhost', blank=True)
confirmation_email_smtp_port = models.IntegerField(default=25, blank=True)
confirmation_email_smtp_user = models.CharField(max_length=200, blank=True)
confirmation_email_smtp_password = models.CharField(max_length=200, blank=True)
confirmation_email_smtp_tls = models.BooleanField(default=False)
confirmation_email_smtp_starttls = models.BooleanField(default=False)
use_custom_email_settings = models.BooleanField(default=False)
def __str__(self):
return self.name
def __repr__(self):
return self.name
class Meta:
index_together = ["id", ]
class SlugModel(models.Model):
slug = models.SlugField(max_length=200)
class Meta:
constraints = [
models.UniqueConstraint(fields=['slug'], name='unique_slugname')
]
def __str__(self):
return self.slug
def __repr__(self):
return self.slug
class Organization(models.Model):
name = models.CharField(max_length=200, verbose_name=ugettext_lazy("Name"), unique=True)
petition_templates = models.ManyToManyField(PetitionTemplate, through='TemplateOwnership',
through_fields=['organization', 'template'], blank=True,
verbose_name=ugettext_lazy("Petition templates"))
petitions = models.ManyToManyField(Petition, blank=True, verbose_name=ugettext_lazy("Petitions"))
default_template = models.ForeignKey(PetitionTemplate, blank=True, null=True, related_name='+',
verbose_name=ugettext_lazy("Default petition template"), to_field='id',
on_delete=models.SET_NULL)
slugname = models.SlugField(max_length=200, unique=True)
def drop(self):
with transaction.atomic():
petitions = list(self.petitions.all())
templates = list(self.petition_templates.all())
self.delete()
for petition in petitions:
petition.delete()
for template in templates:
template.delete()
def add_member(self, member):
member.organizations.add(self)
permission = Permission.objects.create(organization=self)
permission.save()
member.permissions.add(permission)
member.save()
def __str__(self):
return self.name
def __repr__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slugname:
self.slugname = slugify(self.name)
super(Organization, self).save(*args, **kwargs)
@property
def kind(self):
return "org"
@property
def fullname(self):
return self.name
def save(self, *args, **kwargs):
self.slugname = slugify(self.name)
super(Organization, self).save(*args, **kwargs)
class Permission(models.Model):
organization = models.ForeignKey(Organization, on_delete=models.CASCADE,
verbose_name=ugettext_lazy("Organization related to these permissions"))
can_add_members = models.BooleanField(default=False)
can_remove_members = models.BooleanField(default=False)
can_create_petitions = models.BooleanField(default=False)
can_modify_petitions = models.BooleanField(default=False)
can_delete_petitions = models.BooleanField(default=False)
can_create_templates = models.BooleanField(default=False)
can_modify_templates = models.BooleanField(default=False)
can_delete_templates = models.BooleanField(default=False)
can_view_signatures = models.BooleanField(default=False)
can_modify_signatures = models.BooleanField(default=False)
can_delete_signatures = models.BooleanField(default=False)
can_modify_permissions = models.BooleanField(default=False)
def set_all(self, value):
self.can_add_members = value
self.can_remove_members = value
self.can_create_petitions = value
self.can_modify_petitions = value
self.can_delete_petitions = value
self.can_create_templates = value
self.can_modify_templates = value
self.can_delete_templates = value
self.can_view_signatures = value
self.can_modify_signatures = value
self.can_delete_signatures = value
self.can_modify_permissions = value
self.save()
def __str__(self):
ret = "{orgname} : ".format(orgname=self.organization.name)
if self.user.count() > 0:
ret = ret + "{username}".format(username=self.user.all()[0].name)
else:
ret = ret + "None"
return ret
def __repr__(self):
return self.__str__()
class PytitionUser(models.Model):
petitions = models.ManyToManyField(Petition, blank=True)
organizations = models.ManyToManyField(Organization, related_name="members", blank=True)
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="pytitionuser")
permissions = models.ManyToManyField(Permission, related_name="user", blank=True)
invitations = models.ManyToManyField(Organization, related_name="invited", blank=True)
petition_templates = models.ManyToManyField(PetitionTemplate, blank=True, through='TemplateOwnership',
through_fields=['user', 'template'],
verbose_name=ugettext_lazy("Petition templates"))
default_template = models.ForeignKey(PetitionTemplate, blank=True, null=True, related_name='+',
verbose_name=ugettext_lazy("Default petition template"), to_field='id',
on_delete=models.SET_NULL)
def has_right(self, right, petition=None, org=None):
if petition:
if petition in self.petitions.all():
return True
try:
if not org:
org = Organization.objects.get(petitions=petition, members=self)
permissions = self.permissions.get(organization=org)
return getattr(permissions, right)
except:
return False
if org:
try:
permissions = self.permissions.get(organization=org)
return getattr(permissions, right)
except:
return False
return False
def drop(self):
with transaction.atomic():
orgs = list(self.organizations.all())
petitions = list(self.petitions.all())
templates = list(self.petition_templates.all())
self.delete()
for org in orgs:
if org.members.count() == 0:
org.drop()
for petition in petitions:
petition.delete()
for template in templates:
template.delete()
@property
def is_authenticated(self):
return self.user.is_authenticated
@property
def name(self):
return self.username
@property
def username(self):
return self.user.username
@property
def get_full_name(self):
return self.user.get_full_name()
@property
def fullname(self):
return self.get_full_name
@property
def kind(self):
return "user"
def __str__(self):
return self.get_full_name
def __repr__(self):
return self.get_full_name
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_user_profile(sender, instance, created, **kwargs):
if created:
PytitionUser.objects.create(user=instance)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def save_user_profile(sender, instance, **kwargs):
instance.pytitionuser.save()
@receiver(post_save, sender=Organization)
def save_user_profile(sender, instance, **kwargs):
if not instance.slugname:
slugtext = slugify(instance.name)
instance.slugname = slugtext
instance.save()
@receiver(post_delete, sender=PytitionUser)
def post_delete_user(sender, instance, *args, **kwargs):
if instance.user: # just in case user is not specified
instance.user.delete()
class TemplateOwnership(models.Model):
user = models.ForeignKey(PytitionUser, blank=True, null=True, on_delete=models.CASCADE)
organization = models.ForeignKey(Organization, blank=True, null=True, on_delete=models.CASCADE)
template = models.ForeignKey(PetitionTemplate, to_field='id', on_delete=models.CASCADE)
def clean(self):
if self.user is None and self.organization is None:
raise ValidationError(_("The template needs to be owned by a User or an Organization."
"It cannot hang around alone by itself."))
#class Meta:
# unique_together = (("user", "template"), ("organization", "template"))
| <filename>pytition/petition/models.py
from django.db import models
from django.utils.html import mark_safe, strip_tags
from django.utils.text import slugify
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.core.exceptions import ValidationError
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.conf import settings
from django.contrib.auth.hashers import get_hasher
from django.db import transaction
from django.urls import reverse
from django.db.models import Q
from tinymce import models as tinymce_models
from colorfield.fields import ColorField
import html
class Petition(models.Model):
NO = "no gradient"
RIGHT = "to right"
BOTTOM = "to bottom"
BOTTOM_RIGHT = "to bottom right"
BOTTOM_LEFT = "to bottom left"
LINEAR_GRADIENT_CHOICES = (
(NO, "no gradient"),
(RIGHT, "to right"),
(BOTTOM, "to bottom"),
(BOTTOM_RIGHT, "to bottom right"),
(BOTTOM_LEFT, "to bottom left")
)
MAIL = "MAIL"
POST = "POST"
GET = "GET"
NEWSLETTER_SUBSCRIBE_METHOD_CHOICES = (
(MAIL, "MAIL"),
(POST, "POST"),
(GET, "GET")
)
title = models.TextField(verbose_name=ugettext_lazy("Title"))
text = tinymce_models.HTMLField(blank=True)
side_text = tinymce_models.HTMLField(blank=True)
target = models.IntegerField(default=500)
linear_gradient_direction = models.CharField(choices=LINEAR_GRADIENT_CHOICES, max_length=15, default=NO, blank=True)
gradient_from = ColorField(blank=True)
gradient_to = ColorField(blank=True)
bgcolor = ColorField(blank=True)
footer_text = tinymce_models.HTMLField(blank=True)
footer_links = tinymce_models.HTMLField(blank=True)
twitter_description = models.CharField(max_length=200, blank=True)
twitter_image = models.CharField(max_length=500, blank=True)
has_newsletter = models.BooleanField(default=False)
newsletter_subscribe_http_data = models.TextField(blank=True)
newsletter_subscribe_http_mailfield = models.CharField(max_length=100, blank=True)
newsletter_subscribe_http_url = models.CharField(max_length=1000, blank=True)
newsletter_subscribe_mail_subject = models.CharField(max_length=1000, blank=True)
newsletter_subscribe_mail_from = models.CharField(max_length=500, blank=True)
newsletter_subscribe_mail_to = models.CharField(max_length=500, blank=True)
newsletter_subscribe_method = models.CharField(choices=NEWSLETTER_SUBSCRIBE_METHOD_CHOICES, max_length=4,
default=MAIL)
newsletter_subscribe_mail_smtp_host = models.CharField(max_length=100, default='localhost', blank=True)
newsletter_subscribe_mail_smtp_port = models.IntegerField(default=25, blank=True)
newsletter_subscribe_mail_smtp_user = models.CharField(max_length=200, blank=True)
newsletter_subscribe_mail_smtp_password = models.CharField(max_length=200, blank=True)
newsletter_subscribe_mail_smtp_tls = models.BooleanField(default=False)
newsletter_subscribe_mail_smtp_starttls = models.BooleanField(default=False)
org_twitter_handle = models.CharField(max_length=20, blank=True)
published = models.BooleanField(default=False)
newsletter_text = models.CharField(max_length=1000, blank=True)
sign_form_footer = models.TextField(blank=True)
confirmation_email_sender = models.CharField(max_length=100, blank=True)
confirmation_email_smtp_host = models.CharField(max_length=100, default='localhost', blank=True)
confirmation_email_smtp_port = models.IntegerField(default=25, blank=True)
confirmation_email_smtp_user = models.CharField(max_length=200, blank=True)
confirmation_email_smtp_password = models.CharField(max_length=200, blank=True)
confirmation_email_smtp_tls = models.BooleanField(default=False)
confirmation_email_smtp_starttls = models.BooleanField(default=False)
use_custom_email_settings = models.BooleanField(default=False)
salt = models.TextField(blank=True)
slugs = models.ManyToManyField('SlugModel', blank=True, through='SlugOwnership')
def prepopulate_from_template(self, template):
for field in self._meta.fields:
if hasattr(self, field.name) and hasattr(template, field.name):
template_value = getattr(template, field.name)
if template_value is not None and template_value != "":
setattr(self, field.name, template_value)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if not self.salt:
hasher = get_hasher()
self.salt = hasher.salt().decode('utf-8')
super().save()
def slugify(self):
if self.slugs.count() == 0:
slugtext = slugify(self.raw_title)
# let's search for slug collisions
filters = {'slugs__slug': slugtext}
if self.organization_set.count() > 0:
org = self.organization_set.first()
filters.update({'organization__name': org.name})
else:
user = self.pytitionuser_set.first()
filters.update({'pytitionuser__user__username': user.user.username})
results = Petition.objects.filter(**filters)
if results.count() > 0:
raise ValueError(_("This slug is already used by another petition from this organization/user"))
slug = SlugModel(slug=slugify(slugtext))
slug.save()
self.slugs.add(slug)
self.save()
@classmethod
def by_id(cls, id):
try:
return Petition.objects.get(pk=id)
except Petition.DoesNotExist:
return None
def get_signature_number(self, confirmed=None):
signatures = self.signature_set
if confirmed is not None:
signatures = signatures.filter(confirmed=confirmed)
return signatures.count()
def already_signed(self, email):
signature_number = Signature.objects.filter(petition = self.id)\
.filter(confirmed = True).filter(email = email).count()
return signature_number > 0
def confirm_signature(self, conf_hash):
signature = Signature.objects.filter(petition=self.id).get(confirmation_hash=conf_hash)
if signature:
# Now confirm the signature corresponding to this hash
signature.confirm()
signature.save()
return _("Thank you for confirming your signature!")
else:
return None
def add_slug(self, slugtext):
with transaction.atomic():
slugtext = slugify(slugtext)
slug = SlugModel.objects.create(slug=slugtext)
if self.owner_type == "org":
SlugOwnership.objects.create(slug=slug, petition=self, organization=self.owner)
elif self.owner_type == "user":
SlugOwnership.objects.create(slug=slug, petition=self, user=self.owner)
else:
raise ValueError(_("This petition has no owner, cannot add slug!"))
def del_slug(self, slug):
slug.delete()
def publish(self):
self.published = True
self.save()
def unpublish(self):
self.published = False
self.save()
@property
def owner_type(self):
if self.organization_set.count() > 0:
return "org"
elif self.pytitionuser_set.count() > 0:
return "user"
else:
return "no_owner"
@property
def owner(self):
if self.organization_set.count() > 0:
return self.organization_set.first()
elif self.pytitionuser_set.count() > 0:
return self.pytitionuser_set.first()
else:
return None
@property
def signature_number(self):
return self.get_signature_number(True)
@property
def raw_twitter_description(self):
return html.unescape(mark_safe(strip_tags(self.twitter_description)))
@property
def raw_text(self):
return html.unescape(mark_safe(strip_tags(self.text)))
@property
def raw_title(self):
return html.unescape(mark_safe(strip_tags(self.title).strip()))
def __str__(self):
return self.raw_title
def __repr__(self):
return self.raw_title
@property
def url(self):
slugs = self.slugs.all()
if len(slugs) == 0:
# If there is no slug, ugly url
return reverse('detail', kwargs={'petition_id': self.id})
else:
if self.organization_set.count() > 0:
# This petition is owned by an Organization
org = self.organization_set.first()
return reverse("slug_show_petition",
kwargs={"orgslugname": org.slugname,
"petitionname": slugs[0]})
elif self.pytitionuser_set.count() > 0:
# This petition is owned by a PytitionUser
user = self.pytitionuser_set.first()
return reverse("slug_show_petition",
kwargs={"username": user.user.username,
"petitionname": slugs[0]})
else:
# This is a BUG!
raise ValueError(_("This petition is buggy. Sorry about that!"))
class SlugOwnership(models.Model):
petition = models.ForeignKey(Petition, on_delete=models.CASCADE)
slug = models.ForeignKey('SlugModel', on_delete=models.CASCADE)
user = models.ForeignKey('PytitionUser', on_delete=models.CASCADE, blank=True, null=True, default=None)
organization = models.ForeignKey('Organization', on_delete=models.CASCADE, blank=True, null=True, default=None)
class Meta:
constraints = [
models.UniqueConstraint(fields=['slug', 'organization'], name="unique_slugnameperorg", condition=Q(user=None)),
models.UniqueConstraint(fields=['slug', 'user'], name="unique_slugnameperuser",
condition=Q(organization=None)),
]
class Signature(models.Model):
first_name = models.CharField(max_length=50, verbose_name=ugettext_lazy("First name"))
last_name = models.CharField(max_length=50, verbose_name=ugettext_lazy("Last name"))
phone = models.CharField(max_length=20, blank=True, verbose_name=ugettext_lazy("Phone number"))
email = models.EmailField(verbose_name=ugettext_lazy("Email address"))
confirmation_hash = models.CharField(max_length=128)
confirmed = models.BooleanField(default=False, verbose_name=ugettext_lazy("Confirmed"))
petition = models.ForeignKey(Petition, on_delete=models.CASCADE, verbose_name=ugettext_lazy("Petition"))
subscribed_to_mailinglist = models.BooleanField(default=False, verbose_name=ugettext_lazy("Subscribed to mailing list"))
date = models.DateTimeField(blank=True, auto_now_add=True, verbose_name=ugettext_lazy("Date"))
ipaddress = models.TextField(blank=True, null=True)
def clean(self):
if self.petition.already_signed(self.email):
if self.petition.signature_set.filter(email = self.email).get(confirmed = True).id != self.id:
raise ValidationError(_("You already signed the petition"))
def save(self, *args, **kwargs):
self.clean()
if self.confirmed:
# invalidating other signatures from same email
Signature.objects.filter(petition=self.petition).filter(email=self.email)\
.exclude(id=self.id).delete()
super().save(*args, **kwargs)
def confirm(self):
self.confirmed = True
def __str__(self):
return html.unescape("[{}:{}] {} {}".format(self.petition.id, "OK" if self.confirmed else "..", self.first_name,
self.last_name))
def __repr__(self):
return html.unescape("[{}:{}] {} {}".format(self.petition.id, "OK" if self.confirmed else "..", self.first_name,
self.last_name))
class PetitionTemplate(models.Model):
NO = "no gradient"
RIGHT = "to right"
BOTTOM = "to bottom"
BOTTOM_RIGHT = "to bottom right"
BOTTOM_LEFT = "to bottom left"
LINEAR_GRADIENT_CHOICES = (
(NO, "no gradient"),
(RIGHT, "to right"),
(BOTTOM, "to bottom"),
(BOTTOM_RIGHT, "to bottom right"),
(BOTTOM_LEFT, "to bottom left")
)
MAIL = "MAIL"
POST = "POST"
GET = "GET"
NEWSLETTER_SUBSCRIBE_METHOD_CHOICES = (
(MAIL, "MAIL"),
(POST, "POST"),
(GET, "GET")
)
name = models.CharField(max_length=50, verbose_name=ugettext_lazy("Name"), db_index=True)
text = tinymce_models.HTMLField(blank=True)
side_text = tinymce_models.HTMLField(blank=True)
target = models.IntegerField(blank=True, null=True)
linear_gradient_direction = models.CharField(choices=LINEAR_GRADIENT_CHOICES, max_length=15, default=NO, blank=True)
gradient_from = ColorField(blank=True)
gradient_to = ColorField(blank=True)
bgcolor = ColorField(blank=True)
footer_text = tinymce_models.HTMLField(blank=True)
footer_links = tinymce_models.HTMLField(blank=True)
twitter_description = models.CharField(max_length=200, blank=True)
twitter_image = models.CharField(max_length=500, blank=True)
has_newsletter = models.BooleanField(default=False)
newsletter_subscribe_http_data = models.TextField(blank=True)
newsletter_subscribe_http_mailfield = models.CharField(max_length=100, blank=True)
newsletter_subscribe_http_url = models.CharField(max_length=1000, blank=True)
newsletter_subscribe_mail_subject = models.CharField(max_length=1000, blank=True)
newsletter_subscribe_mail_from = models.EmailField(max_length=500, blank=True)
newsletter_subscribe_mail_to = models.EmailField(max_length=500, blank=True)
newsletter_subscribe_method = models.CharField(choices=NEWSLETTER_SUBSCRIBE_METHOD_CHOICES, max_length=4,
default=MAIL)
newsletter_subscribe_mail_smtp_host = models.CharField(max_length=100, default='localhost', blank=True)
newsletter_subscribe_mail_smtp_port = models.IntegerField(default=25)
newsletter_subscribe_mail_smtp_user = models.CharField(max_length=200, blank=True)
newsletter_subscribe_mail_smtp_password = models.CharField(max_length=200, blank=True)
newsletter_subscribe_mail_smtp_tls = models.BooleanField(default=False)
newsletter_subscribe_mail_smtp_starttls = models.BooleanField(default=False)
org_twitter_handle = models.CharField(max_length=20, blank=True)
newsletter_text = models.CharField(max_length=1000, blank=True)
sign_form_footer = models.TextField(blank=True)
confirmation_email_sender = models.EmailField(max_length=100, blank=True)
confirmation_email_smtp_host = models.CharField(max_length=100, default='localhost', blank=True)
confirmation_email_smtp_port = models.IntegerField(default=25, blank=True)
confirmation_email_smtp_user = models.CharField(max_length=200, blank=True)
confirmation_email_smtp_password = models.CharField(max_length=200, blank=True)
confirmation_email_smtp_tls = models.BooleanField(default=False)
confirmation_email_smtp_starttls = models.BooleanField(default=False)
use_custom_email_settings = models.BooleanField(default=False)
def __str__(self):
return self.name
def __repr__(self):
return self.name
class Meta:
index_together = ["id", ]
class SlugModel(models.Model):
slug = models.SlugField(max_length=200)
class Meta:
constraints = [
models.UniqueConstraint(fields=['slug'], name='unique_slugname')
]
def __str__(self):
return self.slug
def __repr__(self):
return self.slug
class Organization(models.Model):
name = models.CharField(max_length=200, verbose_name=ugettext_lazy("Name"), unique=True)
petition_templates = models.ManyToManyField(PetitionTemplate, through='TemplateOwnership',
through_fields=['organization', 'template'], blank=True,
verbose_name=ugettext_lazy("Petition templates"))
petitions = models.ManyToManyField(Petition, blank=True, verbose_name=ugettext_lazy("Petitions"))
default_template = models.ForeignKey(PetitionTemplate, blank=True, null=True, related_name='+',
verbose_name=ugettext_lazy("Default petition template"), to_field='id',
on_delete=models.SET_NULL)
slugname = models.SlugField(max_length=200, unique=True)
def drop(self):
with transaction.atomic():
petitions = list(self.petitions.all())
templates = list(self.petition_templates.all())
self.delete()
for petition in petitions:
petition.delete()
for template in templates:
template.delete()
def add_member(self, member):
member.organizations.add(self)
permission = Permission.objects.create(organization=self)
permission.save()
member.permissions.add(permission)
member.save()
def __str__(self):
return self.name
def __repr__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slugname:
self.slugname = slugify(self.name)
super(Organization, self).save(*args, **kwargs)
@property
def kind(self):
return "org"
@property
def fullname(self):
return self.name
def save(self, *args, **kwargs):
self.slugname = slugify(self.name)
super(Organization, self).save(*args, **kwargs)
class Permission(models.Model):
organization = models.ForeignKey(Organization, on_delete=models.CASCADE,
verbose_name=ugettext_lazy("Organization related to these permissions"))
can_add_members = models.BooleanField(default=False)
can_remove_members = models.BooleanField(default=False)
can_create_petitions = models.BooleanField(default=False)
can_modify_petitions = models.BooleanField(default=False)
can_delete_petitions = models.BooleanField(default=False)
can_create_templates = models.BooleanField(default=False)
can_modify_templates = models.BooleanField(default=False)
can_delete_templates = models.BooleanField(default=False)
can_view_signatures = models.BooleanField(default=False)
can_modify_signatures = models.BooleanField(default=False)
can_delete_signatures = models.BooleanField(default=False)
can_modify_permissions = models.BooleanField(default=False)
def set_all(self, value):
self.can_add_members = value
self.can_remove_members = value
self.can_create_petitions = value
self.can_modify_petitions = value
self.can_delete_petitions = value
self.can_create_templates = value
self.can_modify_templates = value
self.can_delete_templates = value
self.can_view_signatures = value
self.can_modify_signatures = value
self.can_delete_signatures = value
self.can_modify_permissions = value
self.save()
def __str__(self):
ret = "{orgname} : ".format(orgname=self.organization.name)
if self.user.count() > 0:
ret = ret + "{username}".format(username=self.user.all()[0].name)
else:
ret = ret + "None"
return ret
def __repr__(self):
return self.__str__()
class PytitionUser(models.Model):
petitions = models.ManyToManyField(Petition, blank=True)
organizations = models.ManyToManyField(Organization, related_name="members", blank=True)
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="pytitionuser")
permissions = models.ManyToManyField(Permission, related_name="user", blank=True)
invitations = models.ManyToManyField(Organization, related_name="invited", blank=True)
petition_templates = models.ManyToManyField(PetitionTemplate, blank=True, through='TemplateOwnership',
through_fields=['user', 'template'],
verbose_name=ugettext_lazy("Petition templates"))
default_template = models.ForeignKey(PetitionTemplate, blank=True, null=True, related_name='+',
verbose_name=ugettext_lazy("Default petition template"), to_field='id',
on_delete=models.SET_NULL)
def has_right(self, right, petition=None, org=None):
if petition:
if petition in self.petitions.all():
return True
try:
if not org:
org = Organization.objects.get(petitions=petition, members=self)
permissions = self.permissions.get(organization=org)
return getattr(permissions, right)
except:
return False
if org:
try:
permissions = self.permissions.get(organization=org)
return getattr(permissions, right)
except:
return False
return False
def drop(self):
with transaction.atomic():
orgs = list(self.organizations.all())
petitions = list(self.petitions.all())
templates = list(self.petition_templates.all())
self.delete()
for org in orgs:
if org.members.count() == 0:
org.drop()
for petition in petitions:
petition.delete()
for template in templates:
template.delete()
@property
def is_authenticated(self):
return self.user.is_authenticated
@property
def name(self):
return self.username
@property
def username(self):
return self.user.username
@property
def get_full_name(self):
return self.user.get_full_name()
@property
def fullname(self):
return self.get_full_name
@property
def kind(self):
return "user"
def __str__(self):
return self.get_full_name
def __repr__(self):
return self.get_full_name
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_user_profile(sender, instance, created, **kwargs):
if created:
PytitionUser.objects.create(user=instance)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def save_user_profile(sender, instance, **kwargs):
instance.pytitionuser.save()
@receiver(post_save, sender=Organization)
def save_user_profile(sender, instance, **kwargs):
if not instance.slugname:
slugtext = slugify(instance.name)
instance.slugname = slugtext
instance.save()
@receiver(post_delete, sender=PytitionUser)
def post_delete_user(sender, instance, *args, **kwargs):
if instance.user: # just in case user is not specified
instance.user.delete()
class TemplateOwnership(models.Model):
user = models.ForeignKey(PytitionUser, blank=True, null=True, on_delete=models.CASCADE)
organization = models.ForeignKey(Organization, blank=True, null=True, on_delete=models.CASCADE)
template = models.ForeignKey(PetitionTemplate, to_field='id', on_delete=models.CASCADE)
def clean(self):
if self.user is None and self.organization is None:
raise ValidationError(_("The template needs to be owned by a User or an Organization."
"It cannot hang around alone by itself."))
#class Meta:
# unique_together = (("user", "template"), ("organization", "template"))
| en | 0.848554 | # let's search for slug collisions # Now confirm the signature corresponding to this hash # If there is no slug, ugly url # This petition is owned by an Organization # This petition is owned by a PytitionUser # This is a BUG! # invalidating other signatures from same email # just in case user is not specified #class Meta: # unique_together = (("user", "template"), ("organization", "template")) | 2.057399 | 2 |
bin/socialhistory.py | JohnShullTopDev/generating-traning-data-for-healthcare-machine-learningcare- | 1 | 8323 | import csv
from testdata import SOCIALHISTORY_FILE
from testdata import rndDate
from patient import Patient
SMOKINGCODES = {
'428041000124106': 'Current some day smoker',
'266919005' : 'Never smoker',
'449868002' : 'Current every day smoker',
'266927001' : 'Unknown if ever smoked',
'8517006' : 'Former smoker'
}
class SocialHistory(object):
"""Create instances of SocialHistory; also maintains socialHistory by patient id"""
socialHistories = {} # Dictionary of socialHistory by patient ID
@classmethod
def load(cls):
"""Loads patient SocialHistory"""
# Loop through socialHistories and build patient socialHistory lists:
histories = csv.reader(open(SOCIALHISTORY_FILE, 'U'), dialect='excel-tab')
header = next(histories)
for history in histories:
cls(dict(zip(header, history))) # Create a socialHistory instance
def __init__(self, p):
self.pid = p['PID']
self.id = p['ID']
self.smokingStatusCode = p['SMOKINGSTATUSCODE']
self.smokingStatusText = SMOKINGCODES[self.smokingStatusCode]
# Append socialHistory to the patient's socialHistory list:
if self.pid in self.__class__.socialHistories:
raise "Found >1 socialHistory for a patient"
else:
self.__class__.socialHistories[self.pid] = self
def toJSON(self, prefix=""):
if prefix:
prefix += "-"
patient = Patient.mpi[self.pid]
return {
"request": {
"method": "PUT",
"url": "Observation/" + prefix + "smokingstatus-" + self.id
},
"resource": {
"id": prefix + "smokingstatus-" + self.id,
"resourceType": "Observation",
"status": "final",
"identifier": [
{
"use" : "official",
"system": "http://www.bmc.nl/zorgportal/identifiers/observations",
"value" : prefix + self.id
}
],
"text": {
"status": "generated",
"div": '<div xmlns="http://www.w3.org/1999/xhtml">' +
'Tobacco smoking status: %s</div>'%self.smokingStatusText
},
"performer": [
{
"reference": "Practitioner/" + prefix + "Practitioner-" + patient.gp
}
],
"effectiveDateTime": rndDate(2016).isoformat(),
"code": {
"coding": [
{
"system" : "http://loinc.org",
"code" : "72166-2",
"display": "Tobacco smoking status"
}
],
"text": "Tobacco smoking status"
},
"subject": {
"reference": "Patient/" + prefix + self.pid
},
"category": [
{
"coding": [
{
"system" : "http://hl7.org/fhir/observation-category",
"code" : "social-history",
"display": "Social History"
}
],
"text": "Social History"
}
],
"valueCodeableConcept": {
"coding": [
{
"system" : "http://snomed.info/sct",
"code" : self.smokingStatusCode,
"display": self.smokingStatusText
}
],
"text": self.smokingStatusText
}
}
}
| import csv
from testdata import SOCIALHISTORY_FILE
from testdata import rndDate
from patient import Patient
SMOKINGCODES = {
'428041000124106': 'Current some day smoker',
'266919005' : 'Never smoker',
'449868002' : 'Current every day smoker',
'266927001' : 'Unknown if ever smoked',
'8517006' : 'Former smoker'
}
class SocialHistory(object):
"""Create instances of SocialHistory; also maintains socialHistory by patient id"""
socialHistories = {} # Dictionary of socialHistory by patient ID
@classmethod
def load(cls):
"""Loads patient SocialHistory"""
# Loop through socialHistories and build patient socialHistory lists:
histories = csv.reader(open(SOCIALHISTORY_FILE, 'U'), dialect='excel-tab')
header = next(histories)
for history in histories:
cls(dict(zip(header, history))) # Create a socialHistory instance
def __init__(self, p):
self.pid = p['PID']
self.id = p['ID']
self.smokingStatusCode = p['SMOKINGSTATUSCODE']
self.smokingStatusText = SMOKINGCODES[self.smokingStatusCode]
# Append socialHistory to the patient's socialHistory list:
if self.pid in self.__class__.socialHistories:
raise "Found >1 socialHistory for a patient"
else:
self.__class__.socialHistories[self.pid] = self
def toJSON(self, prefix=""):
if prefix:
prefix += "-"
patient = Patient.mpi[self.pid]
return {
"request": {
"method": "PUT",
"url": "Observation/" + prefix + "smokingstatus-" + self.id
},
"resource": {
"id": prefix + "smokingstatus-" + self.id,
"resourceType": "Observation",
"status": "final",
"identifier": [
{
"use" : "official",
"system": "http://www.bmc.nl/zorgportal/identifiers/observations",
"value" : prefix + self.id
}
],
"text": {
"status": "generated",
"div": '<div xmlns="http://www.w3.org/1999/xhtml">' +
'Tobacco smoking status: %s</div>'%self.smokingStatusText
},
"performer": [
{
"reference": "Practitioner/" + prefix + "Practitioner-" + patient.gp
}
],
"effectiveDateTime": rndDate(2016).isoformat(),
"code": {
"coding": [
{
"system" : "http://loinc.org",
"code" : "72166-2",
"display": "Tobacco smoking status"
}
],
"text": "Tobacco smoking status"
},
"subject": {
"reference": "Patient/" + prefix + self.pid
},
"category": [
{
"coding": [
{
"system" : "http://hl7.org/fhir/observation-category",
"code" : "social-history",
"display": "Social History"
}
],
"text": "Social History"
}
],
"valueCodeableConcept": {
"coding": [
{
"system" : "http://snomed.info/sct",
"code" : self.smokingStatusCode,
"display": self.smokingStatusText
}
],
"text": self.smokingStatusText
}
}
}
| en | 0.694716 | Create instances of SocialHistory; also maintains socialHistory by patient id # Dictionary of socialHistory by patient ID Loads patient SocialHistory # Loop through socialHistories and build patient socialHistory lists: # Create a socialHistory instance # Append socialHistory to the patient's socialHistory list: | 3.061729 | 3 |
Python X/Dictionaries in python.py | nirobio/puzzles | 0 | 8324 | {
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# dictionaries, look-up tables & key-value pairs\n",
"# d = {} OR d = dict()\n",
"# e.g. d = {\"George\": 24, \"Tom\": 32}\n",
"\n",
"d = {}\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"d[\"George\"] = 24"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"d[\"Tom\"] = 32\n",
"d[\"Jenny\"] = 16"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'George': 24, 'Tom': 32, 'Jenny': 16}\n"
]
}
],
"source": [
"print(d)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'Jenny' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-5-0bdfff196d23>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mJenny\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mNameError\u001b[0m: name 'Jenny' is not defined"
]
}
],
"source": [
"print(d[Jenny])"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"32\n"
]
}
],
"source": [
"print(d[\"Tom\"])"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"d[\"Jenny\"] = 20"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"20\n"
]
}
],
"source": [
"print(d[\"Jenny\"])"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"# keys are strings or numbers \n",
"\n",
"d[10] = 100"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"100\n"
]
}
],
"source": [
"print(d[10])"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"# how to iterate over key-value pairs"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"key:\n",
"George\n",
"value:\n",
"24\n",
"\n",
"key:\n",
"Tom\n",
"value:\n",
"32\n",
"\n",
"key:\n",
"Jenny\n",
"value:\n",
"20\n",
"\n",
"key:\n",
"10\n",
"value:\n",
"100\n",
"\n"
]
}
],
"source": [
" for key, value in d.items():\n",
" print(\"key:\")\n",
" print(key)\n",
" print(\"value:\")\n",
" print(value)\n",
" print(\"\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| {
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# dictionaries, look-up tables & key-value pairs\n",
"# d = {} OR d = dict()\n",
"# e.g. d = {\"George\": 24, \"Tom\": 32}\n",
"\n",
"d = {}\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"d[\"George\"] = 24"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"d[\"Tom\"] = 32\n",
"d[\"Jenny\"] = 16"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'George': 24, 'Tom': 32, 'Jenny': 16}\n"
]
}
],
"source": [
"print(d)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'Jenny' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-5-0bdfff196d23>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mJenny\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mNameError\u001b[0m: name 'Jenny' is not defined"
]
}
],
"source": [
"print(d[Jenny])"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"32\n"
]
}
],
"source": [
"print(d[\"Tom\"])"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"d[\"Jenny\"] = 20"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"20\n"
]
}
],
"source": [
"print(d[\"Jenny\"])"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"# keys are strings or numbers \n",
"\n",
"d[10] = 100"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"100\n"
]
}
],
"source": [
"print(d[10])"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"# how to iterate over key-value pairs"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"key:\n",
"George\n",
"value:\n",
"24\n",
"\n",
"key:\n",
"Tom\n",
"value:\n",
"32\n",
"\n",
"key:\n",
"Jenny\n",
"value:\n",
"20\n",
"\n",
"key:\n",
"10\n",
"value:\n",
"100\n",
"\n"
]
}
],
"source": [
" for key, value in d.items():\n",
" print(\"key:\")\n",
" print(key)\n",
" print(\"value:\")\n",
" print(value)\n",
" print(\"\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| none | 1 | 1.947433 | 2 |
|
lib/spack/spack/test/cache_fetch.py | LiamBindle/spack | 2,360 | 8325 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
from llnl.util.filesystem import mkdirp, touch
import spack.config
from spack.fetch_strategy import CacheURLFetchStrategy, NoCacheError
from spack.stage import Stage
@pytest.mark.parametrize('_fetch_method', ['curl', 'urllib'])
def test_fetch_missing_cache(tmpdir, _fetch_method):
"""Ensure raise a missing cache file."""
testpath = str(tmpdir)
with spack.config.override('config:url_fetch_method', _fetch_method):
fetcher = CacheURLFetchStrategy(url='file:///not-a-real-cache-file')
with Stage(fetcher, path=testpath):
with pytest.raises(NoCacheError, match=r'No cache'):
fetcher.fetch()
@pytest.mark.parametrize('_fetch_method', ['curl', 'urllib'])
def test_fetch(tmpdir, _fetch_method):
"""Ensure a fetch after expanding is effectively a no-op."""
testpath = str(tmpdir)
cache = os.path.join(testpath, 'cache.tar.gz')
touch(cache)
url = 'file:///{0}'.format(cache)
with spack.config.override('config:url_fetch_method', _fetch_method):
fetcher = CacheURLFetchStrategy(url=url)
with Stage(fetcher, path=testpath) as stage:
source_path = stage.source_path
mkdirp(source_path)
fetcher.fetch()
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
from llnl.util.filesystem import mkdirp, touch
import spack.config
from spack.fetch_strategy import CacheURLFetchStrategy, NoCacheError
from spack.stage import Stage
@pytest.mark.parametrize('_fetch_method', ['curl', 'urllib'])
def test_fetch_missing_cache(tmpdir, _fetch_method):
"""Ensure raise a missing cache file."""
testpath = str(tmpdir)
with spack.config.override('config:url_fetch_method', _fetch_method):
fetcher = CacheURLFetchStrategy(url='file:///not-a-real-cache-file')
with Stage(fetcher, path=testpath):
with pytest.raises(NoCacheError, match=r'No cache'):
fetcher.fetch()
@pytest.mark.parametrize('_fetch_method', ['curl', 'urllib'])
def test_fetch(tmpdir, _fetch_method):
"""Ensure a fetch after expanding is effectively a no-op."""
testpath = str(tmpdir)
cache = os.path.join(testpath, 'cache.tar.gz')
touch(cache)
url = 'file:///{0}'.format(cache)
with spack.config.override('config:url_fetch_method', _fetch_method):
fetcher = CacheURLFetchStrategy(url=url)
with Stage(fetcher, path=testpath) as stage:
source_path = stage.source_path
mkdirp(source_path)
fetcher.fetch()
| en | 0.71959 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) Ensure raise a missing cache file. Ensure a fetch after expanding is effectively a no-op. | 2.01561 | 2 |
temp_range_sql.py | hanhanwu/Hanhan-Spark-Python | 45 | 8326 | <reponame>hanhanwu/Hanhan-Spark-Python
__author__ = 'hanhanw'
import sys
from pyspark import SparkConf, SparkContext
from pyspark.sql.context import SQLContext
from pyspark.sql.types import StructType, StructField, StringType, DoubleType
conf = SparkConf().setAppName("temp range sql")
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
assert sc.version >= '1.5.1'
inputs1 = sys.argv[1]
output = sys.argv[2]
def get_range(recordings):
recordings.registerTempTable('Recordings')
dfrange = sqlContext.sql("""
SELECT r1.DateTime, r1.StationID, (r1.DataValue-r2.DataValue) AS Range FROM
(SELECT StationID, DateTime, Observation, DataValue FROM Recordings
WHERE Observation='TMAX') r1
JOIN
(SELECT StationID, DateTime, Observation, DataValue FROM Recordings
WHERE Observation='TMIN') r2
ON (r1.StationID = r2.StationID AND r1.DateTime = r2.DateTime)
""")
dfrange.registerTempTable('RangeTable')
df_maxrange = sqlContext.sql("""
SELECT DateTime, MAX(Range) AS MaxRange FROM RangeTable
GROUP BY DateTime
""")
df_maxrange.registerTempTable('MaxRange')
df_result = sqlContext.sql("""
SELECT t1.DateTime as DateTime, t1.StationID as StationID, t2.MaxRange as MaxRange FROM
RangeTable t1
JOIN MaxRange t2
ON (t1.DateTime = t2.DateTime AND t1.Range = t2.MaxRange)
""")
return df_result
def main():
temp_schema = StructType([
StructField('StationID', StringType(), False),
StructField('DateTime', StringType(), False),
StructField('Observation', StringType(), False),
StructField('DataValue', DoubleType(), False),
StructField('MFlag', StringType(), True),
StructField('QFlag', StringType(), True),
StructField('SFlag', StringType(), True),
StructField('OBSTime', StringType(), True),
])
df = sqlContext.read.format('com.databricks.spark.csv').options(header='false').load(inputs1, schema=temp_schema)
df = df.filter(df.QFlag == '')
dfrange = get_range(df)
result = dfrange.rdd.map(lambda r: str(r.DateTime)+' '+str(r.StationID)+' '+str(r.MaxRange))
outdata = result.sortBy(lambda r: r[0]).coalesce(1)
outdata.saveAsTextFile(output)
if __name__ == "__main__":
main()
| __author__ = 'hanhanw'
import sys
from pyspark import SparkConf, SparkContext
from pyspark.sql.context import SQLContext
from pyspark.sql.types import StructType, StructField, StringType, DoubleType
conf = SparkConf().setAppName("temp range sql")
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
assert sc.version >= '1.5.1'
inputs1 = sys.argv[1]
output = sys.argv[2]
def get_range(recordings):
recordings.registerTempTable('Recordings')
dfrange = sqlContext.sql("""
SELECT r1.DateTime, r1.StationID, (r1.DataValue-r2.DataValue) AS Range FROM
(SELECT StationID, DateTime, Observation, DataValue FROM Recordings
WHERE Observation='TMAX') r1
JOIN
(SELECT StationID, DateTime, Observation, DataValue FROM Recordings
WHERE Observation='TMIN') r2
ON (r1.StationID = r2.StationID AND r1.DateTime = r2.DateTime)
""")
dfrange.registerTempTable('RangeTable')
df_maxrange = sqlContext.sql("""
SELECT DateTime, MAX(Range) AS MaxRange FROM RangeTable
GROUP BY DateTime
""")
df_maxrange.registerTempTable('MaxRange')
df_result = sqlContext.sql("""
SELECT t1.DateTime as DateTime, t1.StationID as StationID, t2.MaxRange as MaxRange FROM
RangeTable t1
JOIN MaxRange t2
ON (t1.DateTime = t2.DateTime AND t1.Range = t2.MaxRange)
""")
return df_result
def main():
temp_schema = StructType([
StructField('StationID', StringType(), False),
StructField('DateTime', StringType(), False),
StructField('Observation', StringType(), False),
StructField('DataValue', DoubleType(), False),
StructField('MFlag', StringType(), True),
StructField('QFlag', StringType(), True),
StructField('SFlag', StringType(), True),
StructField('OBSTime', StringType(), True),
])
df = sqlContext.read.format('com.databricks.spark.csv').options(header='false').load(inputs1, schema=temp_schema)
df = df.filter(df.QFlag == '')
dfrange = get_range(df)
result = dfrange.rdd.map(lambda r: str(r.DateTime)+' '+str(r.StationID)+' '+str(r.MaxRange))
outdata = result.sortBy(lambda r: r[0]).coalesce(1)
outdata.saveAsTextFile(output)
if __name__ == "__main__":
main() | en | 0.595405 | SELECT r1.DateTime, r1.StationID, (r1.DataValue-r2.DataValue) AS Range FROM (SELECT StationID, DateTime, Observation, DataValue FROM Recordings WHERE Observation='TMAX') r1 JOIN (SELECT StationID, DateTime, Observation, DataValue FROM Recordings WHERE Observation='TMIN') r2 ON (r1.StationID = r2.StationID AND r1.DateTime = r2.DateTime) SELECT DateTime, MAX(Range) AS MaxRange FROM RangeTable GROUP BY DateTime SELECT t1.DateTime as DateTime, t1.StationID as StationID, t2.MaxRange as MaxRange FROM RangeTable t1 JOIN MaxRange t2 ON (t1.DateTime = t2.DateTime AND t1.Range = t2.MaxRange) | 2.667005 | 3 |
container/pyf/graphqltypes/Event.py | Pompino/react-components-23KB | 2 | 8327 | from typing_extensions import Required
#from sqlalchemy.sql.sqltypes import Boolean
from graphene import ObjectType, String, Field, ID, List, DateTime, Mutation, Boolean, Int
from models.EventsRelated.EventModel import EventModel
from graphqltypes.Utils import extractSession
class EventType(ObjectType):
id = ID()
name = String()
lastchange = DateTime()
externalId = String()
users = List('graphqltypes.User.UserType')
def resolve_users(parent, info):
session = extractSession(info)
dbRecord = session.query(EventModel).get(parent.id)
return dbRecord.users
groups = List('graphqltypes.Group.GroupType')
def resolve_users(parent, info):
session = extractSession(info)
dbRecord = session.query(EventModel).get(parent.id)
return dbRecord.groups
rooms = List('graphqltypes.Room.RoomType')
def resolve_rooms(parent, info):
session = extractSession(info)
dbRecord = session.query(EventModel).get(parent.id)
return dbRecord.rooms
| from typing_extensions import Required
#from sqlalchemy.sql.sqltypes import Boolean
from graphene import ObjectType, String, Field, ID, List, DateTime, Mutation, Boolean, Int
from models.EventsRelated.EventModel import EventModel
from graphqltypes.Utils import extractSession
class EventType(ObjectType):
id = ID()
name = String()
lastchange = DateTime()
externalId = String()
users = List('graphqltypes.User.UserType')
def resolve_users(parent, info):
session = extractSession(info)
dbRecord = session.query(EventModel).get(parent.id)
return dbRecord.users
groups = List('graphqltypes.Group.GroupType')
def resolve_users(parent, info):
session = extractSession(info)
dbRecord = session.query(EventModel).get(parent.id)
return dbRecord.groups
rooms = List('graphqltypes.Room.RoomType')
def resolve_rooms(parent, info):
session = extractSession(info)
dbRecord = session.query(EventModel).get(parent.id)
return dbRecord.rooms
| en | 0.150175 | #from sqlalchemy.sql.sqltypes import Boolean | 2.173708 | 2 |
desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/drawing/shape.py | kokosing/hue | 3 | 8328 | <filename>desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/drawing/shape.py
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.styles.colors import Color, BLACK, WHITE
from openpyxl.utils.units import (
pixels_to_EMU,
EMU_to_pixels,
short_color,
)
from openpyxl.compat import deprecated
from openpyxl.xml.functions import Element, SubElement, tostring
from openpyxl.xml.constants import (
DRAWING_NS,
SHEET_DRAWING_NS,
CHART_NS,
CHART_DRAWING_NS,
PKG_REL_NS
)
from openpyxl.compat.strings import safe_string
class Shape(object):
""" a drawing inside a chart
coordiantes are specified by the user in the axis units
"""
MARGIN_LEFT = 6 + 13 + 1
MARGIN_BOTTOM = 17 + 11
FONT_WIDTH = 7
FONT_HEIGHT = 8
ROUND_RECT = 'roundRect'
RECT = 'rect'
# other shapes to define :
'''
"line"
"lineInv"
"triangle"
"rtTriangle"
"diamond"
"parallelogram"
"trapezoid"
"nonIsoscelesTrapezoid"
"pentagon"
"hexagon"
"heptagon"
"octagon"
"decagon"
"dodecagon"
"star4"
"star5"
"star6"
"star7"
"star8"
"star10"
"star12"
"star16"
"star24"
"star32"
"roundRect"
"round1Rect"
"round2SameRect"
"round2DiagRect"
"snipRoundRect"
"snip1Rect"
"snip2SameRect"
"snip2DiagRect"
"plaque"
"ellipse"
"teardrop"
"homePlate"
"chevron"
"pieWedge"
"pie"
"blockArc"
"donut"
"noSmoking"
"rightArrow"
"leftArrow"
"upArrow"
"downArrow"
"stripedRightArrow"
"notchedRightArrow"
"bentUpArrow"
"leftRightArrow"
"upDownArrow"
"leftUpArrow"
"leftRightUpArrow"
"quadArrow"
"leftArrowCallout"
"rightArrowCallout"
"upArrowCallout"
"downArrowCallout"
"leftRightArrowCallout"
"upDownArrowCallout"
"quadArrowCallout"
"bentArrow"
"uturnArrow"
"circularArrow"
"leftCircularArrow"
"leftRightCircularArrow"
"curvedRightArrow"
"curvedLeftArrow"
"curvedUpArrow"
"curvedDownArrow"
"swooshArrow"
"cube"
"can"
"lightningBolt"
"heart"
"sun"
"moon"
"smileyFace"
"irregularSeal1"
"irregularSeal2"
"foldedCorner"
"bevel"
"frame"
"halfFrame"
"corner"
"diagStripe"
"chord"
"arc"
"leftBracket"
"rightBracket"
"leftBrace"
"rightBrace"
"bracketPair"
"bracePair"
"straightConnector1"
"bentConnector2"
"bentConnector3"
"bentConnector4"
"bentConnector5"
"curvedConnector2"
"curvedConnector3"
"curvedConnector4"
"curvedConnector5"
"callout1"
"callout2"
"callout3"
"accentCallout1"
"accentCallout2"
"accentCallout3"
"borderCallout1"
"borderCallout2"
"borderCallout3"
"accentBorderCallout1"
"accentBorderCallout2"
"accentBorderCallout3"
"wedgeRectCallout"
"wedgeRoundRectCallout"
"wedgeEllipseCallout"
"cloudCallout"
"cloud"
"ribbon"
"ribbon2"
"ellipseRibbon"
"ellipseRibbon2"
"leftRightRibbon"
"verticalScroll"
"horizontalScroll"
"wave"
"doubleWave"
"plus"
"flowChartProcess"
"flowChartDecision"
"flowChartInputOutput"
"flowChartPredefinedProcess"
"flowChartInternalStorage"
"flowChartDocument"
"flowChartMultidocument"
"flowChartTerminator"
"flowChartPreparation"
"flowChartManualInput"
"flowChartManualOperation"
"flowChartConnector"
"flowChartPunchedCard"
"flowChartPunchedTape"
"flowChartSummingJunction"
"flowChartOr"
"flowChartCollate"
"flowChartSort"
"flowChartExtract"
"flowChartMerge"
"flowChartOfflineStorage"
"flowChartOnlineStorage"
"flowChartMagneticTape"
"flowChartMagneticDisk"
"flowChartMagneticDrum"
"flowChartDisplay"
"flowChartDelay"
"flowChartAlternateProcess"
"flowChartOffpageConnector"
"actionButtonBlank"
"actionButtonHome"
"actionButtonHelp"
"actionButtonInformation"
"actionButtonForwardNext"
"actionButtonBackPrevious"
"actionButtonEnd"
"actionButtonBeginning"
"actionButtonReturn"
"actionButtonDocument"
"actionButtonSound"
"actionButtonMovie"
"gear6"
"gear9"
"funnel"
"mathPlus"
"mathMinus"
"mathMultiply"
"mathDivide"
"mathEqual"
"mathNotEqual"
"cornerTabs"
"squareTabs"
"plaqueTabs"
"chartX"
"chartStar"
"chartPlus"
'''
@deprecated("Chart Drawings need a complete rewrite")
def __init__(self,
chart,
coordinates=((0, 0), (1, 1)),
text=None,
scheme="accent1"):
self.chart = chart
self.coordinates = coordinates # in axis units
self.text = text
self.scheme = scheme
self.style = Shape.RECT
self.border_width = 0
self.border_color = BLACK # "F3B3C5"
self.color = WHITE
self.text_color = BLACK
@property
def border_color(self):
return self._border_color
@border_color.setter
def border_color(self, color):
self._border_color = short_color(color)
@property
def color(self):
return self._color
@color.setter
def color(self, color):
self._color = short_color(color)
@property
def text_color(self):
return self._text_color
@text_color.setter
def text_color(self, color):
self._text_color = short_color(color)
@property
def border_width(self):
return self._border_width
@border_width.setter
def border_width(self, w):
self._border_width = w
@property
def coordinates(self):
"""Return coordindates in axis units"""
return self._coordinates
@coordinates.setter
def coordinates(self, coords):
""" set shape coordinates in percentages (left, top, right, bottom)
"""
# this needs refactoring to reflect changes in charts
self.axis_coordinates = coords
(x1, y1), (x2, y2) = coords # bottom left, top right
drawing_width = pixels_to_EMU(self.chart.drawing.width)
drawing_height = pixels_to_EMU(self.chart.drawing.height)
plot_width = drawing_width * self.chart.width
plot_height = drawing_height * self.chart.height
margin_left = self.chart._get_margin_left() * drawing_width
xunit = plot_width / self.chart.get_x_units()
margin_top = self.chart._get_margin_top() * drawing_height
yunit = self.chart.get_y_units()
x_start = (margin_left + (float(x1) * xunit)) / drawing_width
y_start = ((margin_top
+ plot_height
- (float(y1) * yunit))
/ drawing_height)
x_end = (margin_left + (float(x2) * xunit)) / drawing_width
y_end = ((margin_top
+ plot_height
- (float(y2) * yunit))
/ drawing_height)
# allow user to specify y's in whatever order
# excel expect y_end to be lower
if y_end < y_start:
y_end, y_start = y_start, y_end
self._coordinates = (
self._norm_pct(x_start), self._norm_pct(y_start),
self._norm_pct(x_end), self._norm_pct(y_end)
)
@staticmethod
def _norm_pct(pct):
""" force shapes to appear by truncating too large sizes """
if pct > 1:
return 1
elif pct < 0:
return 0
return pct
class ShapeWriter(object):
""" one file per shape """
def __init__(self, shapes):
self._shapes = shapes
def write(self, shape_id):
root = Element('{%s}userShapes' % CHART_NS)
for shape in self._shapes:
anchor = SubElement(root, '{%s}relSizeAnchor' % CHART_DRAWING_NS)
xstart, ystart, xend, yend = shape.coordinates
_from = SubElement(anchor, '{%s}from' % CHART_DRAWING_NS)
SubElement(_from, '{%s}x' % CHART_DRAWING_NS).text = str(xstart)
SubElement(_from, '{%s}y' % CHART_DRAWING_NS).text = str(ystart)
_to = SubElement(anchor, '{%s}to' % CHART_DRAWING_NS)
SubElement(_to, '{%s}x' % CHART_DRAWING_NS).text = str(xend)
SubElement(_to, '{%s}y' % CHART_DRAWING_NS).text = str(yend)
sp = SubElement(anchor, '{%s}sp' % CHART_DRAWING_NS, {'macro':'', 'textlink':''})
nvspr = SubElement(sp, '{%s}nvSpPr' % CHART_DRAWING_NS)
SubElement(nvspr, '{%s}cNvPr' % CHART_DRAWING_NS, {'id':str(shape_id), 'name':'shape %s' % shape_id})
SubElement(nvspr, '{%s}cNvSpPr' % CHART_DRAWING_NS)
sppr = SubElement(sp, '{%s}spPr' % CHART_DRAWING_NS)
frm = SubElement(sppr, '{%s}xfrm' % DRAWING_NS,)
# no transformation
SubElement(frm, '{%s}off' % DRAWING_NS, {'x':'0', 'y':'0'})
SubElement(frm, '{%s}ext' % DRAWING_NS, {'cx':'0', 'cy':'0'})
prstgeom = SubElement(sppr, '{%s}prstGeom' % DRAWING_NS, {'prst':str(shape.style)})
SubElement(prstgeom, '{%s}avLst' % DRAWING_NS)
fill = SubElement(sppr, '{%s}solidFill' % DRAWING_NS, )
SubElement(fill, '{%s}srgbClr' % DRAWING_NS, {'val':shape.color})
border = SubElement(sppr, '{%s}ln' % DRAWING_NS, {'w':str(shape._border_width)})
sf = SubElement(border, '{%s}solidFill' % DRAWING_NS)
SubElement(sf, '{%s}srgbClr' % DRAWING_NS, {'val':shape.border_color})
self._write_style(sp)
self._write_text(sp, shape)
shape_id += 1
return tostring(root)
def _write_text(self, node, shape):
""" write text in the shape """
tx_body = SubElement(node, '{%s}txBody' % CHART_DRAWING_NS)
SubElement(tx_body, '{%s}bodyPr' % DRAWING_NS, {'vertOverflow':'clip'})
SubElement(tx_body, '{%s}lstStyle' % DRAWING_NS)
p = SubElement(tx_body, '{%s}p' % DRAWING_NS)
if shape.text:
r = SubElement(p, '{%s}r' % DRAWING_NS)
rpr = SubElement(r, '{%s}rPr' % DRAWING_NS, {'lang':'en-US'})
fill = SubElement(rpr, '{%s}solidFill' % DRAWING_NS)
SubElement(fill, '{%s}srgbClr' % DRAWING_NS, {'val':shape.text_color})
SubElement(r, '{%s}t' % DRAWING_NS).text = shape.text
else:
SubElement(p, '{%s}endParaRPr' % DRAWING_NS, {'lang':'en-US'})
def _write_style(self, node):
""" write style theme """
style = SubElement(node, '{%s}style' % CHART_DRAWING_NS)
ln_ref = SubElement(style, '{%s}lnRef' % DRAWING_NS, {'idx':'2'})
scheme_clr = SubElement(ln_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
SubElement(scheme_clr, '{%s}shade' % DRAWING_NS, {'val':'50000'})
fill_ref = SubElement(style, '{%s}fillRef' % DRAWING_NS, {'idx':'1'})
SubElement(fill_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
effect_ref = SubElement(style, '{%s}effectRef' % DRAWING_NS, {'idx':'0'})
SubElement(effect_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
font_ref = SubElement(style, '{%s}fontRef' % DRAWING_NS, {'idx':'minor'})
SubElement(font_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'lt1'})
| <filename>desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/drawing/shape.py
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.styles.colors import Color, BLACK, WHITE
from openpyxl.utils.units import (
pixels_to_EMU,
EMU_to_pixels,
short_color,
)
from openpyxl.compat import deprecated
from openpyxl.xml.functions import Element, SubElement, tostring
from openpyxl.xml.constants import (
DRAWING_NS,
SHEET_DRAWING_NS,
CHART_NS,
CHART_DRAWING_NS,
PKG_REL_NS
)
from openpyxl.compat.strings import safe_string
class Shape(object):
""" a drawing inside a chart
coordiantes are specified by the user in the axis units
"""
MARGIN_LEFT = 6 + 13 + 1
MARGIN_BOTTOM = 17 + 11
FONT_WIDTH = 7
FONT_HEIGHT = 8
ROUND_RECT = 'roundRect'
RECT = 'rect'
# other shapes to define :
'''
"line"
"lineInv"
"triangle"
"rtTriangle"
"diamond"
"parallelogram"
"trapezoid"
"nonIsoscelesTrapezoid"
"pentagon"
"hexagon"
"heptagon"
"octagon"
"decagon"
"dodecagon"
"star4"
"star5"
"star6"
"star7"
"star8"
"star10"
"star12"
"star16"
"star24"
"star32"
"roundRect"
"round1Rect"
"round2SameRect"
"round2DiagRect"
"snipRoundRect"
"snip1Rect"
"snip2SameRect"
"snip2DiagRect"
"plaque"
"ellipse"
"teardrop"
"homePlate"
"chevron"
"pieWedge"
"pie"
"blockArc"
"donut"
"noSmoking"
"rightArrow"
"leftArrow"
"upArrow"
"downArrow"
"stripedRightArrow"
"notchedRightArrow"
"bentUpArrow"
"leftRightArrow"
"upDownArrow"
"leftUpArrow"
"leftRightUpArrow"
"quadArrow"
"leftArrowCallout"
"rightArrowCallout"
"upArrowCallout"
"downArrowCallout"
"leftRightArrowCallout"
"upDownArrowCallout"
"quadArrowCallout"
"bentArrow"
"uturnArrow"
"circularArrow"
"leftCircularArrow"
"leftRightCircularArrow"
"curvedRightArrow"
"curvedLeftArrow"
"curvedUpArrow"
"curvedDownArrow"
"swooshArrow"
"cube"
"can"
"lightningBolt"
"heart"
"sun"
"moon"
"smileyFace"
"irregularSeal1"
"irregularSeal2"
"foldedCorner"
"bevel"
"frame"
"halfFrame"
"corner"
"diagStripe"
"chord"
"arc"
"leftBracket"
"rightBracket"
"leftBrace"
"rightBrace"
"bracketPair"
"bracePair"
"straightConnector1"
"bentConnector2"
"bentConnector3"
"bentConnector4"
"bentConnector5"
"curvedConnector2"
"curvedConnector3"
"curvedConnector4"
"curvedConnector5"
"callout1"
"callout2"
"callout3"
"accentCallout1"
"accentCallout2"
"accentCallout3"
"borderCallout1"
"borderCallout2"
"borderCallout3"
"accentBorderCallout1"
"accentBorderCallout2"
"accentBorderCallout3"
"wedgeRectCallout"
"wedgeRoundRectCallout"
"wedgeEllipseCallout"
"cloudCallout"
"cloud"
"ribbon"
"ribbon2"
"ellipseRibbon"
"ellipseRibbon2"
"leftRightRibbon"
"verticalScroll"
"horizontalScroll"
"wave"
"doubleWave"
"plus"
"flowChartProcess"
"flowChartDecision"
"flowChartInputOutput"
"flowChartPredefinedProcess"
"flowChartInternalStorage"
"flowChartDocument"
"flowChartMultidocument"
"flowChartTerminator"
"flowChartPreparation"
"flowChartManualInput"
"flowChartManualOperation"
"flowChartConnector"
"flowChartPunchedCard"
"flowChartPunchedTape"
"flowChartSummingJunction"
"flowChartOr"
"flowChartCollate"
"flowChartSort"
"flowChartExtract"
"flowChartMerge"
"flowChartOfflineStorage"
"flowChartOnlineStorage"
"flowChartMagneticTape"
"flowChartMagneticDisk"
"flowChartMagneticDrum"
"flowChartDisplay"
"flowChartDelay"
"flowChartAlternateProcess"
"flowChartOffpageConnector"
"actionButtonBlank"
"actionButtonHome"
"actionButtonHelp"
"actionButtonInformation"
"actionButtonForwardNext"
"actionButtonBackPrevious"
"actionButtonEnd"
"actionButtonBeginning"
"actionButtonReturn"
"actionButtonDocument"
"actionButtonSound"
"actionButtonMovie"
"gear6"
"gear9"
"funnel"
"mathPlus"
"mathMinus"
"mathMultiply"
"mathDivide"
"mathEqual"
"mathNotEqual"
"cornerTabs"
"squareTabs"
"plaqueTabs"
"chartX"
"chartStar"
"chartPlus"
'''
@deprecated("Chart Drawings need a complete rewrite")
def __init__(self,
chart,
coordinates=((0, 0), (1, 1)),
text=None,
scheme="accent1"):
self.chart = chart
self.coordinates = coordinates # in axis units
self.text = text
self.scheme = scheme
self.style = Shape.RECT
self.border_width = 0
self.border_color = BLACK # "F3B3C5"
self.color = WHITE
self.text_color = BLACK
@property
def border_color(self):
return self._border_color
@border_color.setter
def border_color(self, color):
self._border_color = short_color(color)
@property
def color(self):
return self._color
@color.setter
def color(self, color):
self._color = short_color(color)
@property
def text_color(self):
return self._text_color
@text_color.setter
def text_color(self, color):
self._text_color = short_color(color)
@property
def border_width(self):
return self._border_width
@border_width.setter
def border_width(self, w):
self._border_width = w
@property
def coordinates(self):
"""Return coordindates in axis units"""
return self._coordinates
@coordinates.setter
def coordinates(self, coords):
""" set shape coordinates in percentages (left, top, right, bottom)
"""
# this needs refactoring to reflect changes in charts
self.axis_coordinates = coords
(x1, y1), (x2, y2) = coords # bottom left, top right
drawing_width = pixels_to_EMU(self.chart.drawing.width)
drawing_height = pixels_to_EMU(self.chart.drawing.height)
plot_width = drawing_width * self.chart.width
plot_height = drawing_height * self.chart.height
margin_left = self.chart._get_margin_left() * drawing_width
xunit = plot_width / self.chart.get_x_units()
margin_top = self.chart._get_margin_top() * drawing_height
yunit = self.chart.get_y_units()
x_start = (margin_left + (float(x1) * xunit)) / drawing_width
y_start = ((margin_top
+ plot_height
- (float(y1) * yunit))
/ drawing_height)
x_end = (margin_left + (float(x2) * xunit)) / drawing_width
y_end = ((margin_top
+ plot_height
- (float(y2) * yunit))
/ drawing_height)
# allow user to specify y's in whatever order
# excel expect y_end to be lower
if y_end < y_start:
y_end, y_start = y_start, y_end
self._coordinates = (
self._norm_pct(x_start), self._norm_pct(y_start),
self._norm_pct(x_end), self._norm_pct(y_end)
)
@staticmethod
def _norm_pct(pct):
""" force shapes to appear by truncating too large sizes """
if pct > 1:
return 1
elif pct < 0:
return 0
return pct
class ShapeWriter(object):
""" one file per shape """
def __init__(self, shapes):
self._shapes = shapes
def write(self, shape_id):
root = Element('{%s}userShapes' % CHART_NS)
for shape in self._shapes:
anchor = SubElement(root, '{%s}relSizeAnchor' % CHART_DRAWING_NS)
xstart, ystart, xend, yend = shape.coordinates
_from = SubElement(anchor, '{%s}from' % CHART_DRAWING_NS)
SubElement(_from, '{%s}x' % CHART_DRAWING_NS).text = str(xstart)
SubElement(_from, '{%s}y' % CHART_DRAWING_NS).text = str(ystart)
_to = SubElement(anchor, '{%s}to' % CHART_DRAWING_NS)
SubElement(_to, '{%s}x' % CHART_DRAWING_NS).text = str(xend)
SubElement(_to, '{%s}y' % CHART_DRAWING_NS).text = str(yend)
sp = SubElement(anchor, '{%s}sp' % CHART_DRAWING_NS, {'macro':'', 'textlink':''})
nvspr = SubElement(sp, '{%s}nvSpPr' % CHART_DRAWING_NS)
SubElement(nvspr, '{%s}cNvPr' % CHART_DRAWING_NS, {'id':str(shape_id), 'name':'shape %s' % shape_id})
SubElement(nvspr, '{%s}cNvSpPr' % CHART_DRAWING_NS)
sppr = SubElement(sp, '{%s}spPr' % CHART_DRAWING_NS)
frm = SubElement(sppr, '{%s}xfrm' % DRAWING_NS,)
# no transformation
SubElement(frm, '{%s}off' % DRAWING_NS, {'x':'0', 'y':'0'})
SubElement(frm, '{%s}ext' % DRAWING_NS, {'cx':'0', 'cy':'0'})
prstgeom = SubElement(sppr, '{%s}prstGeom' % DRAWING_NS, {'prst':str(shape.style)})
SubElement(prstgeom, '{%s}avLst' % DRAWING_NS)
fill = SubElement(sppr, '{%s}solidFill' % DRAWING_NS, )
SubElement(fill, '{%s}srgbClr' % DRAWING_NS, {'val':shape.color})
border = SubElement(sppr, '{%s}ln' % DRAWING_NS, {'w':str(shape._border_width)})
sf = SubElement(border, '{%s}solidFill' % DRAWING_NS)
SubElement(sf, '{%s}srgbClr' % DRAWING_NS, {'val':shape.border_color})
self._write_style(sp)
self._write_text(sp, shape)
shape_id += 1
return tostring(root)
def _write_text(self, node, shape):
""" write text in the shape """
tx_body = SubElement(node, '{%s}txBody' % CHART_DRAWING_NS)
SubElement(tx_body, '{%s}bodyPr' % DRAWING_NS, {'vertOverflow':'clip'})
SubElement(tx_body, '{%s}lstStyle' % DRAWING_NS)
p = SubElement(tx_body, '{%s}p' % DRAWING_NS)
if shape.text:
r = SubElement(p, '{%s}r' % DRAWING_NS)
rpr = SubElement(r, '{%s}rPr' % DRAWING_NS, {'lang':'en-US'})
fill = SubElement(rpr, '{%s}solidFill' % DRAWING_NS)
SubElement(fill, '{%s}srgbClr' % DRAWING_NS, {'val':shape.text_color})
SubElement(r, '{%s}t' % DRAWING_NS).text = shape.text
else:
SubElement(p, '{%s}endParaRPr' % DRAWING_NS, {'lang':'en-US'})
def _write_style(self, node):
""" write style theme """
style = SubElement(node, '{%s}style' % CHART_DRAWING_NS)
ln_ref = SubElement(style, '{%s}lnRef' % DRAWING_NS, {'idx':'2'})
scheme_clr = SubElement(ln_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
SubElement(scheme_clr, '{%s}shade' % DRAWING_NS, {'val':'50000'})
fill_ref = SubElement(style, '{%s}fillRef' % DRAWING_NS, {'idx':'1'})
SubElement(fill_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
effect_ref = SubElement(style, '{%s}effectRef' % DRAWING_NS, {'idx':'0'})
SubElement(effect_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
font_ref = SubElement(style, '{%s}fontRef' % DRAWING_NS, {'idx':'minor'})
SubElement(font_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'lt1'})
| en | 0.60914 | # Copyright (c) 2010-2015 openpyxl a drawing inside a chart coordiantes are specified by the user in the axis units # other shapes to define : "line" "lineInv" "triangle" "rtTriangle" "diamond" "parallelogram" "trapezoid" "nonIsoscelesTrapezoid" "pentagon" "hexagon" "heptagon" "octagon" "decagon" "dodecagon" "star4" "star5" "star6" "star7" "star8" "star10" "star12" "star16" "star24" "star32" "roundRect" "round1Rect" "round2SameRect" "round2DiagRect" "snipRoundRect" "snip1Rect" "snip2SameRect" "snip2DiagRect" "plaque" "ellipse" "teardrop" "homePlate" "chevron" "pieWedge" "pie" "blockArc" "donut" "noSmoking" "rightArrow" "leftArrow" "upArrow" "downArrow" "stripedRightArrow" "notchedRightArrow" "bentUpArrow" "leftRightArrow" "upDownArrow" "leftUpArrow" "leftRightUpArrow" "quadArrow" "leftArrowCallout" "rightArrowCallout" "upArrowCallout" "downArrowCallout" "leftRightArrowCallout" "upDownArrowCallout" "quadArrowCallout" "bentArrow" "uturnArrow" "circularArrow" "leftCircularArrow" "leftRightCircularArrow" "curvedRightArrow" "curvedLeftArrow" "curvedUpArrow" "curvedDownArrow" "swooshArrow" "cube" "can" "lightningBolt" "heart" "sun" "moon" "smileyFace" "irregularSeal1" "irregularSeal2" "foldedCorner" "bevel" "frame" "halfFrame" "corner" "diagStripe" "chord" "arc" "leftBracket" "rightBracket" "leftBrace" "rightBrace" "bracketPair" "bracePair" "straightConnector1" "bentConnector2" "bentConnector3" "bentConnector4" "bentConnector5" "curvedConnector2" "curvedConnector3" "curvedConnector4" "curvedConnector5" "callout1" "callout2" "callout3" "accentCallout1" "accentCallout2" "accentCallout3" "borderCallout1" "borderCallout2" "borderCallout3" "accentBorderCallout1" "accentBorderCallout2" "accentBorderCallout3" "wedgeRectCallout" "wedgeRoundRectCallout" "wedgeEllipseCallout" "cloudCallout" "cloud" "ribbon" "ribbon2" "ellipseRibbon" "ellipseRibbon2" "leftRightRibbon" "verticalScroll" "horizontalScroll" "wave" "doubleWave" "plus" "flowChartProcess" "flowChartDecision" "flowChartInputOutput" "flowChartPredefinedProcess" "flowChartInternalStorage" "flowChartDocument" "flowChartMultidocument" "flowChartTerminator" "flowChartPreparation" "flowChartManualInput" "flowChartManualOperation" "flowChartConnector" "flowChartPunchedCard" "flowChartPunchedTape" "flowChartSummingJunction" "flowChartOr" "flowChartCollate" "flowChartSort" "flowChartExtract" "flowChartMerge" "flowChartOfflineStorage" "flowChartOnlineStorage" "flowChartMagneticTape" "flowChartMagneticDisk" "flowChartMagneticDrum" "flowChartDisplay" "flowChartDelay" "flowChartAlternateProcess" "flowChartOffpageConnector" "actionButtonBlank" "actionButtonHome" "actionButtonHelp" "actionButtonInformation" "actionButtonForwardNext" "actionButtonBackPrevious" "actionButtonEnd" "actionButtonBeginning" "actionButtonReturn" "actionButtonDocument" "actionButtonSound" "actionButtonMovie" "gear6" "gear9" "funnel" "mathPlus" "mathMinus" "mathMultiply" "mathDivide" "mathEqual" "mathNotEqual" "cornerTabs" "squareTabs" "plaqueTabs" "chartX" "chartStar" "chartPlus" # in axis units # "F3B3C5" Return coordindates in axis units set shape coordinates in percentages (left, top, right, bottom) # this needs refactoring to reflect changes in charts # bottom left, top right # allow user to specify y's in whatever order # excel expect y_end to be lower force shapes to appear by truncating too large sizes one file per shape # no transformation write text in the shape write style theme | 2.299718 | 2 |
scripts/VCF/FILTER/subset_vcf.py | elowy01/igsr_analysis | 3 | 8329 |
from VcfQC import VcfQC
from ReseqTrackDB import File
from ReseqTrackDB import ReseqTrackDB
import argparse
import os
import logging
import datetime
#get command line arguments
parser = argparse.ArgumentParser(description='Script to subset a VCF by excluding the variants within the regions defined by a BED file')
'''
Reseqtrack DB connection parameters
'''
parser.add_argument('--hostname', type=str, required=True, help='Hostname for ReseqTrack DB' )
parser.add_argument('--username', type=str, required=True, help='User for ReseqTrack DB' )
parser.add_argument('--port', type=int, required=True, help='Port number in the ReseqTrack DB' )
parser.add_argument('--pwd', type=str, help='PWD for the ReseqTrack DB' )
parser.add_argument('--db', type=str, required=True, help='DB name in the ReseqTrack DB' )
parser.add_argument('--type', type=str, required=True, help='Type of the new VCF file' )
parser.add_argument('--vcftools_folder', type=str, required=True, help='Folder containing the VCFtools binary' )
parser.add_argument('--bgzip_folder', type=str, required=True, help='Folder containing the bgzip binary')
parser.add_argument('--filename', type=str, required=True, help='Name (without the fullpath) of the VCF file that will be analysed. It assumes that the filename format is for example lc_bams.gatk.xxxx.vcf.gz, where lc_bams is the analysis group and gatk is the method used' )
parser.add_argument('--bed', type=str, required=True, help='BED file containing the coordinates to exclude' )
parser.add_argument('--outsuffix', type=str, required=True, help='Suffix for vcf output file. i.e. no_cms or no_offtarget' )
parser.add_argument('--outdir', type=str, required=True, help='Directory used to put the output files.' )
args = parser.parse_args()
if __name__ == '__main__':
if os.path.isdir(args.outdir) == False:
raise Exception("Output dir does not exist: %s"%args.outdir)
hostname=args.hostname
username=args.username
db=args.db
port=args.port
pwd=args.pwd
reseqdb = ReseqTrackDB(host=hostname,user=username,port=port,pwd=<PASSWORD>,db=db)
file=reseqdb.fetch_file_by_filename(args.filename)
#constructing the out filename
now = datetime.datetime.now().strftime('%Y%m%d')
bits= os.path.basename(file.name).split('.')
outprefix=bits[0]+"."+bits[1]+"."+args.outsuffix+"."+now
log_filename="subset_vcf_%s.log"% outprefix
logger = logging.getLogger("subset_vcf")
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler(log_filename)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add handler to logger object
logger.addHandler(fh)
logger.info("Program started")
vcfQC = VcfQC(vcf=file.path,bgzip_folder=args.bgzip_folder,vcftools_folder=args.vcftools_folder)
vcffile=vcfQC.subset_vcf(bed=args.bed,outprefix=outprefix,outdir=args.outdir,create_index=True)
f=File(path=vcffile,type=args.type,host_id=1,withdrawn=0)
f.store(reseqdb,do_md5=True)
logger.info("Done!.")
|
from VcfQC import VcfQC
from ReseqTrackDB import File
from ReseqTrackDB import ReseqTrackDB
import argparse
import os
import logging
import datetime
#get command line arguments
parser = argparse.ArgumentParser(description='Script to subset a VCF by excluding the variants within the regions defined by a BED file')
'''
Reseqtrack DB connection parameters
'''
parser.add_argument('--hostname', type=str, required=True, help='Hostname for ReseqTrack DB' )
parser.add_argument('--username', type=str, required=True, help='User for ReseqTrack DB' )
parser.add_argument('--port', type=int, required=True, help='Port number in the ReseqTrack DB' )
parser.add_argument('--pwd', type=str, help='PWD for the ReseqTrack DB' )
parser.add_argument('--db', type=str, required=True, help='DB name in the ReseqTrack DB' )
parser.add_argument('--type', type=str, required=True, help='Type of the new VCF file' )
parser.add_argument('--vcftools_folder', type=str, required=True, help='Folder containing the VCFtools binary' )
parser.add_argument('--bgzip_folder', type=str, required=True, help='Folder containing the bgzip binary')
parser.add_argument('--filename', type=str, required=True, help='Name (without the fullpath) of the VCF file that will be analysed. It assumes that the filename format is for example lc_bams.gatk.xxxx.vcf.gz, where lc_bams is the analysis group and gatk is the method used' )
parser.add_argument('--bed', type=str, required=True, help='BED file containing the coordinates to exclude' )
parser.add_argument('--outsuffix', type=str, required=True, help='Suffix for vcf output file. i.e. no_cms or no_offtarget' )
parser.add_argument('--outdir', type=str, required=True, help='Directory used to put the output files.' )
args = parser.parse_args()
if __name__ == '__main__':
if os.path.isdir(args.outdir) == False:
raise Exception("Output dir does not exist: %s"%args.outdir)
hostname=args.hostname
username=args.username
db=args.db
port=args.port
pwd=args.pwd
reseqdb = ReseqTrackDB(host=hostname,user=username,port=port,pwd=<PASSWORD>,db=db)
file=reseqdb.fetch_file_by_filename(args.filename)
#constructing the out filename
now = datetime.datetime.now().strftime('%Y%m%d')
bits= os.path.basename(file.name).split('.')
outprefix=bits[0]+"."+bits[1]+"."+args.outsuffix+"."+now
log_filename="subset_vcf_%s.log"% outprefix
logger = logging.getLogger("subset_vcf")
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler(log_filename)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add handler to logger object
logger.addHandler(fh)
logger.info("Program started")
vcfQC = VcfQC(vcf=file.path,bgzip_folder=args.bgzip_folder,vcftools_folder=args.vcftools_folder)
vcffile=vcfQC.subset_vcf(bed=args.bed,outprefix=outprefix,outdir=args.outdir,create_index=True)
f=File(path=vcffile,type=args.type,host_id=1,withdrawn=0)
f.store(reseqdb,do_md5=True)
logger.info("Done!.")
| en | 0.394076 | #get command line arguments Reseqtrack DB connection parameters #constructing the out filename # create the logging file handler # add handler to logger object | 2.635062 | 3 |
controllers/restart.py | Acidburn0zzz/helloworld | 0 | 8330 | <reponame>Acidburn0zzz/helloworld
import os
from base import BaseHandler
class RestartHandler(BaseHandler):
def get(self):
if not self.authenticate(superuser=True):
return
os.system('touch ' + self.application.settings["restart_path"])
self.redirect(self.get_argument("next"))
| import os
from base import BaseHandler
class RestartHandler(BaseHandler):
def get(self):
if not self.authenticate(superuser=True):
return
os.system('touch ' + self.application.settings["restart_path"])
self.redirect(self.get_argument("next")) | none | 1 | 2.321021 | 2 |
|
nova/tests/unit/conductor/tasks/test_migrate.py | badock/nova-tidb | 0 | 8331 | <gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.compute import rpcapi as compute_rpcapi
from nova.conductor.tasks import migrate
from nova import objects
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit.conductor.test_conductor import FakeContext
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
class MigrationTaskTestCase(test.NoDBTestCase):
def setUp(self):
super(MigrationTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
self.flavor = fake_flavor.fake_flavor_obj(self.context)
self.flavor.extra_specs = {'extra_specs': 'fake'}
inst = fake_instance.fake_db_instance(image_ref='image_ref',
instance_type=self.flavor)
inst_object = objects.Instance(
flavor=self.flavor,
numa_topology=None,
pci_requests=None,
system_metadata={'image_hw_disk_bus': 'scsi'})
self.instance = objects.Instance._from_db_object(
self.context, inst_object, inst, [])
self.request_spec = objects.RequestSpec(image=objects.ImageMeta())
self.hosts = [dict(host='host1', nodename=None, limits={})]
self.filter_properties = {'limits': {}, 'retry': {'num_attempts': 1,
'hosts': [['host1', None]]}}
self.reservations = []
self.clean_shutdown = True
def _generate_task(self):
return migrate.MigrationTask(self.context, self.instance, self.flavor,
self.request_spec, self.reservations,
self.clean_shutdown,
compute_rpcapi.ComputeAPI(),
scheduler_client.SchedulerClient())
@mock.patch.object(objects.RequestSpec, 'from_components')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize')
@mock.patch.object(objects.Quotas, 'from_reservations')
def test_execute(self, quotas_mock, prep_resize_mock,
sel_dest_mock, sig_mock, request_spec_from_components):
sel_dest_mock.return_value = self.hosts
task = self._generate_task()
request_spec_from_components.return_value = self.request_spec
legacy_request_spec = self.request_spec.to_legacy_request_spec_dict()
task.execute()
quotas_mock.assert_called_once_with(self.context, self.reservations,
instance=self.instance)
sig_mock.assert_called_once_with(self.context, legacy_request_spec,
self.filter_properties)
task.scheduler_client.select_destinations.assert_called_once_with(
self.context, self.request_spec)
prep_resize_mock.assert_called_once_with(
self.context, self.instance, legacy_request_spec['image'],
self.flavor, self.hosts[0]['host'], self.reservations,
request_spec=legacy_request_spec,
filter_properties=self.filter_properties,
node=self.hosts[0]['nodename'], clean_shutdown=self.clean_shutdown)
self.assertFalse(quotas_mock.return_value.rollback.called)
def test_rollback(self):
task = self._generate_task()
task.quotas = mock.MagicMock()
task.rollback()
task.quotas.rollback.assert_called_once_with()
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.compute import rpcapi as compute_rpcapi
from nova.conductor.tasks import migrate
from nova import objects
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit.conductor.test_conductor import FakeContext
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
class MigrationTaskTestCase(test.NoDBTestCase):
def setUp(self):
super(MigrationTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
self.flavor = fake_flavor.fake_flavor_obj(self.context)
self.flavor.extra_specs = {'extra_specs': 'fake'}
inst = fake_instance.fake_db_instance(image_ref='image_ref',
instance_type=self.flavor)
inst_object = objects.Instance(
flavor=self.flavor,
numa_topology=None,
pci_requests=None,
system_metadata={'image_hw_disk_bus': 'scsi'})
self.instance = objects.Instance._from_db_object(
self.context, inst_object, inst, [])
self.request_spec = objects.RequestSpec(image=objects.ImageMeta())
self.hosts = [dict(host='host1', nodename=None, limits={})]
self.filter_properties = {'limits': {}, 'retry': {'num_attempts': 1,
'hosts': [['host1', None]]}}
self.reservations = []
self.clean_shutdown = True
def _generate_task(self):
return migrate.MigrationTask(self.context, self.instance, self.flavor,
self.request_spec, self.reservations,
self.clean_shutdown,
compute_rpcapi.ComputeAPI(),
scheduler_client.SchedulerClient())
@mock.patch.object(objects.RequestSpec, 'from_components')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize')
@mock.patch.object(objects.Quotas, 'from_reservations')
def test_execute(self, quotas_mock, prep_resize_mock,
sel_dest_mock, sig_mock, request_spec_from_components):
sel_dest_mock.return_value = self.hosts
task = self._generate_task()
request_spec_from_components.return_value = self.request_spec
legacy_request_spec = self.request_spec.to_legacy_request_spec_dict()
task.execute()
quotas_mock.assert_called_once_with(self.context, self.reservations,
instance=self.instance)
sig_mock.assert_called_once_with(self.context, legacy_request_spec,
self.filter_properties)
task.scheduler_client.select_destinations.assert_called_once_with(
self.context, self.request_spec)
prep_resize_mock.assert_called_once_with(
self.context, self.instance, legacy_request_spec['image'],
self.flavor, self.hosts[0]['host'], self.reservations,
request_spec=legacy_request_spec,
filter_properties=self.filter_properties,
node=self.hosts[0]['nodename'], clean_shutdown=self.clean_shutdown)
self.assertFalse(quotas_mock.return_value.rollback.called)
def test_rollback(self):
task = self._generate_task()
task.quotas = mock.MagicMock()
task.rollback()
task.quotas.rollback.assert_called_once_with() | en | 0.859654 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.615812 | 2 |
CH7_GitCmdAndCtrl/modules/environment.py | maxmac12/BlackHatPython | 0 | 8332 | import os
def run(**kwargs):
print("[*] In environment module.")
return str(os.environ) | import os
def run(**kwargs):
print("[*] In environment module.")
return str(os.environ) | none | 1 | 1.895633 | 2 |
|
diskcatalog/core/views.py | rywjhzd/Cataloging-and-Visualizing-Cradles-of-Planet-Formation | 0 | 8333 | from django.shortcuts import render
from .models import Disk
import os
def index(request):
context = {}
disk_list = Disk.objects.all()
context['disk_list'] = disk_list
return render(request, 'index.html', context)
#def index(request):
# module_dir = os.path.dirname(__file__)
# file_path = os.path.join(module_dir, 'data.txt')
# disk_list = open(file_path , 'r')
# data = data_file.read()
# context = {'disk_list': data}
# return render(request, 'index.html', context)
| from django.shortcuts import render
from .models import Disk
import os
def index(request):
context = {}
disk_list = Disk.objects.all()
context['disk_list'] = disk_list
return render(request, 'index.html', context)
#def index(request):
# module_dir = os.path.dirname(__file__)
# file_path = os.path.join(module_dir, 'data.txt')
# disk_list = open(file_path , 'r')
# data = data_file.read()
# context = {'disk_list': data}
# return render(request, 'index.html', context)
| en | 0.113488 | #def index(request): # module_dir = os.path.dirname(__file__) # file_path = os.path.join(module_dir, 'data.txt') # disk_list = open(file_path , 'r') # data = data_file.read() # context = {'disk_list': data} # return render(request, 'index.html', context) | 2.02282 | 2 |
misc/python/materialize/checks/insert_select.py | guswynn/materialize | 0 | 8334 | <reponame>guswynn/materialize<filename>misc/python/materialize/checks/insert_select.py
# Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
from textwrap import dedent
from typing import List
from materialize.checks.actions import Testdrive
from materialize.checks.checks import Check
class InsertSelect(Check):
def initialize(self) -> Testdrive:
return Testdrive(
dedent(
"""
> CREATE TABLE insert_select_destination (f1 STRING);
> CREATE TABLE insert_select_source_table (f1 STRING);
> INSERT INTO insert_select_source_table SELECT 'T1' || generate_series FROM generate_series(1,10000);
"""
)
)
def manipulate(self) -> List[Testdrive]:
return [
Testdrive(dedent(s))
for s in [
"""
> INSERT INTO insert_select_source_table SELECT 'T2' || generate_series FROM generate_series(1, 10000);
> INSERT INTO insert_select_destination SELECT * FROM insert_select_source_table;
""",
"""
> INSERT INTO insert_select_source_table SELECT 'T3' || generate_series FROM generate_series(1, 10000);
> INSERT INTO insert_select_destination SELECT * FROM insert_select_source_table;
""",
]
]
def validate(self) -> Testdrive:
return Testdrive(
dedent(
"""
> SELECT LEFT(f1, 2), COUNT(*), COUNT(DISTINCT f1) FROM insert_select_destination GROUP BY LEFT(f1, 2);
T1 20000 10000
T2 20000 10000
T3 10000 10000
"""
)
)
| # Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
from textwrap import dedent
from typing import List
from materialize.checks.actions import Testdrive
from materialize.checks.checks import Check
class InsertSelect(Check):
def initialize(self) -> Testdrive:
return Testdrive(
dedent(
"""
> CREATE TABLE insert_select_destination (f1 STRING);
> CREATE TABLE insert_select_source_table (f1 STRING);
> INSERT INTO insert_select_source_table SELECT 'T1' || generate_series FROM generate_series(1,10000);
"""
)
)
def manipulate(self) -> List[Testdrive]:
return [
Testdrive(dedent(s))
for s in [
"""
> INSERT INTO insert_select_source_table SELECT 'T2' || generate_series FROM generate_series(1, 10000);
> INSERT INTO insert_select_destination SELECT * FROM insert_select_source_table;
""",
"""
> INSERT INTO insert_select_source_table SELECT 'T3' || generate_series FROM generate_series(1, 10000);
> INSERT INTO insert_select_destination SELECT * FROM insert_select_source_table;
""",
]
]
def validate(self) -> Testdrive:
return Testdrive(
dedent(
"""
> SELECT LEFT(f1, 2), COUNT(*), COUNT(DISTINCT f1) FROM insert_select_destination GROUP BY LEFT(f1, 2);
T1 20000 10000
T2 20000 10000
T3 10000 10000
"""
)
) | en | 0.660477 | # Copyright Materialize, Inc. and contributors. All rights reserved. # # Use of this software is governed by the Business Source License # included in the LICENSE file at the root of this repository. # # As of the Change Date specified in that file, in accordance with # the Business Source License, use of this software will be governed # by the Apache License, Version 2.0. > CREATE TABLE insert_select_destination (f1 STRING); > CREATE TABLE insert_select_source_table (f1 STRING); > INSERT INTO insert_select_source_table SELECT 'T1' || generate_series FROM generate_series(1,10000); > INSERT INTO insert_select_source_table SELECT 'T2' || generate_series FROM generate_series(1, 10000); > INSERT INTO insert_select_destination SELECT * FROM insert_select_source_table; > INSERT INTO insert_select_source_table SELECT 'T3' || generate_series FROM generate_series(1, 10000); > INSERT INTO insert_select_destination SELECT * FROM insert_select_source_table; > SELECT LEFT(f1, 2), COUNT(*), COUNT(DISTINCT f1) FROM insert_select_destination GROUP BY LEFT(f1, 2); T1 20000 10000 T2 20000 10000 T3 10000 10000 | 2.126746 | 2 |
mojoco trivial/mujocoSim/UR5/simple_example/Mujoco_py_example.py | garlicbutter/Jonathan-Tom | 2 | 8335 | <filename>mojoco trivial/mujocoSim/UR5/simple_example/Mujoco_py_example.py
import numpy as np
import mujoco_py as mj
from mujoco_py_renderer import SimulationError, XMLError, MujocoPyRenderer
from mujoco_py import (MjSim, load_model_from_xml,functions,
load_model_from_path, MjSimState,
ignore_mujoco_warnings,
load_model_from_mjb)
from matplotlib import pyplot as plt
import time
xml = """
<mujoco model="example">
<compiler coordinate="global"/>
<default>
<geom rgba=".8 .6 .4 1"/>
</default>
<asset>
<texture type="skybox" builtin="gradient" rgb1="1 1 1" rgb2=".6 .8 1"
width="256" height="256"/>
</asset>
<worldbody>
<light pos="0 1 1" dir="0 -1 -1" diffuse="1 1 1"/>
<geom name="floor" pos="0 0 0" rgba="0.8 0.9 0.8 1" size="10 10 10" type="plane"/>
<body>
<site name="world" size="0.1" pos="0 0 0" />
<geom name="first_pole" type="capsule" fromto="0 0 0 0 0 0.5" size="0.04"/>
<joint name='a' type="hinge" pos="0 0 0" axis="0 0 1" />
<body name="second_pole">
<inertial pos="0 0 0" mass="0.00000001" diaginertia="1e-008 1e-008 1e-008" />
<geom type="capsule" fromto="0 0 0.5 0.5 0 0.5" size="0.04" name="second_pole"/>
<joint name='b' type="hinge" pos="0 0 0.5" axis="0 1 0"/>
<body name='third_pole'>
<inertial pos="0 0 0" mass="0.00000001" diaginertia="1e-008 1e-008 1e-008" />
<geom type="capsule" fromto="0.5 0 0.5 1 0 0.5" size="0.04" name="third_pole"/>
<joint name='c' type="hinge" pos="0.5 0 0.5" axis="0 1 0"/>
<site name="target" size="0.1" pos="1 0 0.5" />
<body name="mass">
<inertial pos="1 0 0.5" mass="1e-2" diaginertia="1e-008 1e-008 1e-008" />
<geom type="sphere" pos="1 0 0.5" size="0.2" name="mass"/>
</body>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor joint="a"/>
<motor joint="b"/>
<motor joint="c"/>
</actuator>
</mujoco>
"""
model = load_model_from_xml(xml)
sim = MjSim(model)
viewer = MujocoPyRenderer(sim)
sim.reset()
# After reset jacobians are all zeros
sim.forward()
target_jacp = np.zeros(3 * sim.model.nv)
target_jacr= np.zeros(3 * sim.model.nv)
F=np.array([0,0,-9.81*1e-2,0,0,0]).T
#np.testing.assert_allclose(target_jacp, np.zeros(3 * sim.model.nv))
# After first forward, jacobians are real
#sim.forward()
K_diag=2000
C_diag=100
A_diag=1e-3
K=np.identity(3)*K_diag
C=np.identity(3)*C_diag
A=np.identity(3)*A_diag
#K_diag=0.3
#C_diag=0.05
for i in range(3):
K[i, i]=K_diag
C[i,i]=C_diag
A[i, i] = A_diag
x_intial=sim.data.site_xpos[1]
print(x_intial)
x_desired=np.array([0,1,0.3])
v_intial=sim.data.site_xvelp[1]
v_desired=np.array([0,0,0])
a_desired=np.array([0,0,0])
a_intial=np.array([0,0,0])
dt=sim.model.opt.timestep
#sim.data.get_site_jacp('target', jacp=target_jacp)
# Should be unchanged after steps (zero action)
graph=[]
for _ in range(100000):
F[:3]=np.dot(K,x_desired-x_intial)+np.dot(C,v_desired-v_intial)+np.dot(A,a_desired-a_intial)
H = np.zeros(sim.model.nv* sim.model.nv)
functions.mj_fullM(sim.model, H, sim.data.qM)
sim.data.get_site_jacp('target', jacp=target_jacp)
sim.data.get_site_jacr('target', jacr=target_jacr)
J_L = target_jacp.reshape((3, sim.model.nv))
J_A = target_jacr.reshape((3, sim.model.nv))
J = np.concatenate((J_L, J_A), axis=0)
H_L =np.dot(np.linalg.pinv(J_L.T),np.dot(H.reshape(sim.model.nv, sim.model.nv), np.linalg.pinv(J_L)))
H_all=np.dot(np.linalg.pinv(J.T),np.dot(H.reshape(sim.model.nv, sim.model.nv), np.linalg.pinv(J)))
#F_a=np.dot(A,0.3-sim.data.qacc)
#action = np.dot(J_L.T, np.dot(H_L, F[:3]))+sim.data.qfrc_bias
action = sim.data.qfrc_bias+np.dot(H.reshape(3,3),np.dot(J_L.T,F[:3]))
#print(action)
#action = np.dot(J.T, F)
sim.data.ctrl[:] = action
sim.step()
sim.forward()
#print(np.max(action))
#print(sim.data.qacc)
viewer.render()
x_intial = sim.data.site_xpos[1]
a_intial=(v_intial-sim.data.site_xvelp[1])/dt
print(a_intial)
v_intial = sim.data.site_xvelp[1]
normal=np.linalg.norm(x_intial-x_desired)
#print(normal)
if normal<0.1:
print("in")
if x_desired[0]==0:
x_desired = np.array([-1, 0, 0.5])
elif x_desired[0]==1:
x_desired = np.array([0, 1, 0.3])
elif x_desired[0] == -1:
x_desired = np.array([1, 0, 0.5])
graph.append(np.abs(x_intial-x_desired))
# sim.forward()
print("the desired is {} and the intial is{}".format(x_desired,x_intial))
plt.plot(graph)
plt.show() | <filename>mojoco trivial/mujocoSim/UR5/simple_example/Mujoco_py_example.py
import numpy as np
import mujoco_py as mj
from mujoco_py_renderer import SimulationError, XMLError, MujocoPyRenderer
from mujoco_py import (MjSim, load_model_from_xml,functions,
load_model_from_path, MjSimState,
ignore_mujoco_warnings,
load_model_from_mjb)
from matplotlib import pyplot as plt
import time
xml = """
<mujoco model="example">
<compiler coordinate="global"/>
<default>
<geom rgba=".8 .6 .4 1"/>
</default>
<asset>
<texture type="skybox" builtin="gradient" rgb1="1 1 1" rgb2=".6 .8 1"
width="256" height="256"/>
</asset>
<worldbody>
<light pos="0 1 1" dir="0 -1 -1" diffuse="1 1 1"/>
<geom name="floor" pos="0 0 0" rgba="0.8 0.9 0.8 1" size="10 10 10" type="plane"/>
<body>
<site name="world" size="0.1" pos="0 0 0" />
<geom name="first_pole" type="capsule" fromto="0 0 0 0 0 0.5" size="0.04"/>
<joint name='a' type="hinge" pos="0 0 0" axis="0 0 1" />
<body name="second_pole">
<inertial pos="0 0 0" mass="0.00000001" diaginertia="1e-008 1e-008 1e-008" />
<geom type="capsule" fromto="0 0 0.5 0.5 0 0.5" size="0.04" name="second_pole"/>
<joint name='b' type="hinge" pos="0 0 0.5" axis="0 1 0"/>
<body name='third_pole'>
<inertial pos="0 0 0" mass="0.00000001" diaginertia="1e-008 1e-008 1e-008" />
<geom type="capsule" fromto="0.5 0 0.5 1 0 0.5" size="0.04" name="third_pole"/>
<joint name='c' type="hinge" pos="0.5 0 0.5" axis="0 1 0"/>
<site name="target" size="0.1" pos="1 0 0.5" />
<body name="mass">
<inertial pos="1 0 0.5" mass="1e-2" diaginertia="1e-008 1e-008 1e-008" />
<geom type="sphere" pos="1 0 0.5" size="0.2" name="mass"/>
</body>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor joint="a"/>
<motor joint="b"/>
<motor joint="c"/>
</actuator>
</mujoco>
"""
model = load_model_from_xml(xml)
sim = MjSim(model)
viewer = MujocoPyRenderer(sim)
sim.reset()
# After reset jacobians are all zeros
sim.forward()
target_jacp = np.zeros(3 * sim.model.nv)
target_jacr= np.zeros(3 * sim.model.nv)
F=np.array([0,0,-9.81*1e-2,0,0,0]).T
#np.testing.assert_allclose(target_jacp, np.zeros(3 * sim.model.nv))
# After first forward, jacobians are real
#sim.forward()
K_diag=2000
C_diag=100
A_diag=1e-3
K=np.identity(3)*K_diag
C=np.identity(3)*C_diag
A=np.identity(3)*A_diag
#K_diag=0.3
#C_diag=0.05
for i in range(3):
K[i, i]=K_diag
C[i,i]=C_diag
A[i, i] = A_diag
x_intial=sim.data.site_xpos[1]
print(x_intial)
x_desired=np.array([0,1,0.3])
v_intial=sim.data.site_xvelp[1]
v_desired=np.array([0,0,0])
a_desired=np.array([0,0,0])
a_intial=np.array([0,0,0])
dt=sim.model.opt.timestep
#sim.data.get_site_jacp('target', jacp=target_jacp)
# Should be unchanged after steps (zero action)
graph=[]
for _ in range(100000):
F[:3]=np.dot(K,x_desired-x_intial)+np.dot(C,v_desired-v_intial)+np.dot(A,a_desired-a_intial)
H = np.zeros(sim.model.nv* sim.model.nv)
functions.mj_fullM(sim.model, H, sim.data.qM)
sim.data.get_site_jacp('target', jacp=target_jacp)
sim.data.get_site_jacr('target', jacr=target_jacr)
J_L = target_jacp.reshape((3, sim.model.nv))
J_A = target_jacr.reshape((3, sim.model.nv))
J = np.concatenate((J_L, J_A), axis=0)
H_L =np.dot(np.linalg.pinv(J_L.T),np.dot(H.reshape(sim.model.nv, sim.model.nv), np.linalg.pinv(J_L)))
H_all=np.dot(np.linalg.pinv(J.T),np.dot(H.reshape(sim.model.nv, sim.model.nv), np.linalg.pinv(J)))
#F_a=np.dot(A,0.3-sim.data.qacc)
#action = np.dot(J_L.T, np.dot(H_L, F[:3]))+sim.data.qfrc_bias
action = sim.data.qfrc_bias+np.dot(H.reshape(3,3),np.dot(J_L.T,F[:3]))
#print(action)
#action = np.dot(J.T, F)
sim.data.ctrl[:] = action
sim.step()
sim.forward()
#print(np.max(action))
#print(sim.data.qacc)
viewer.render()
x_intial = sim.data.site_xpos[1]
a_intial=(v_intial-sim.data.site_xvelp[1])/dt
print(a_intial)
v_intial = sim.data.site_xvelp[1]
normal=np.linalg.norm(x_intial-x_desired)
#print(normal)
if normal<0.1:
print("in")
if x_desired[0]==0:
x_desired = np.array([-1, 0, 0.5])
elif x_desired[0]==1:
x_desired = np.array([0, 1, 0.3])
elif x_desired[0] == -1:
x_desired = np.array([1, 0, 0.5])
graph.append(np.abs(x_intial-x_desired))
# sim.forward()
print("the desired is {} and the intial is{}".format(x_desired,x_intial))
plt.plot(graph)
plt.show() | en | 0.245874 | <mujoco model="example"> <compiler coordinate="global"/> <default> <geom rgba=".8 .6 .4 1"/> </default> <asset> <texture type="skybox" builtin="gradient" rgb1="1 1 1" rgb2=".6 .8 1" width="256" height="256"/> </asset> <worldbody> <light pos="0 1 1" dir="0 -1 -1" diffuse="1 1 1"/> <geom name="floor" pos="0 0 0" rgba="0.8 0.9 0.8 1" size="10 10 10" type="plane"/> <body> <site name="world" size="0.1" pos="0 0 0" /> <geom name="first_pole" type="capsule" fromto="0 0 0 0 0 0.5" size="0.04"/> <joint name='a' type="hinge" pos="0 0 0" axis="0 0 1" /> <body name="second_pole"> <inertial pos="0 0 0" mass="0.00000001" diaginertia="1e-008 1e-008 1e-008" /> <geom type="capsule" fromto="0 0 0.5 0.5 0 0.5" size="0.04" name="second_pole"/> <joint name='b' type="hinge" pos="0 0 0.5" axis="0 1 0"/> <body name='third_pole'> <inertial pos="0 0 0" mass="0.00000001" diaginertia="1e-008 1e-008 1e-008" /> <geom type="capsule" fromto="0.5 0 0.5 1 0 0.5" size="0.04" name="third_pole"/> <joint name='c' type="hinge" pos="0.5 0 0.5" axis="0 1 0"/> <site name="target" size="0.1" pos="1 0 0.5" /> <body name="mass"> <inertial pos="1 0 0.5" mass="1e-2" diaginertia="1e-008 1e-008 1e-008" /> <geom type="sphere" pos="1 0 0.5" size="0.2" name="mass"/> </body> </body> </body> </body> </worldbody> <actuator> <motor joint="a"/> <motor joint="b"/> <motor joint="c"/> </actuator> </mujoco> # After reset jacobians are all zeros #np.testing.assert_allclose(target_jacp, np.zeros(3 * sim.model.nv)) # After first forward, jacobians are real #sim.forward() #K_diag=0.3 #C_diag=0.05 #sim.data.get_site_jacp('target', jacp=target_jacp) # Should be unchanged after steps (zero action) #F_a=np.dot(A,0.3-sim.data.qacc) #action = np.dot(J_L.T, np.dot(H_L, F[:3]))+sim.data.qfrc_bias #print(action) #action = np.dot(J.T, F) #print(np.max(action)) #print(sim.data.qacc) #print(normal) # sim.forward() | 2.032383 | 2 |
evaluation/wordpress/pull_docker_images_from_private_registry.py | seveirbian/gear-old | 0 | 8336 | import sys
# package need to be installed, pip install docker
import docker
import time
import yaml
import os
import xlwt
auto = False
private_registry = "192.168.3.11:9999/"
# result
result = [["tag", "finishTime", "size", "data"], ]
class Puller:
def __init__(self, images):
self.images_to_pull = images
def check(self):
# detect whether the file exists, if true, delete it
if os.path.exists("./images_pulled.txt"):
os.remove("./images_pulled.txt")
def pull(self):
self.check()
client = docker.from_env()
# if don't give a tag, then all image under this registry will be pulled
repos = self.images_to_pull[0]["repo"]
for repo in repos:
tags = self.images_to_pull[1][repo]
for tag in tags:
print "start pulling: ", private_registry+repo, ":", tag
# get present time
startTime = time.time()
# get present net data
cnetdata = get_net_data()
# pull images
try:
image_pulled = client.images.pull(repository=private_registry+repo, tag=str(tag))
# print pull time
finishTime = time.time() - startTime
print "finished in " , finishTime, "s"
# get image's size
size = image_pulled.attrs[u'Size'] / 1000000.0
print "image size: ", size
data = get_net_data() - cnetdata
print "pull data: ", data
print "\n"
# record the image and its pulling time
result.append([tag, finishTime, size, data])
except docker.errors.NotFound:
print private_registry+repo + " not found...\n\n"
except docker.errors.ImageNotFound:
print private_registry+repo + " image not fount...\n\n"
if auto != True:
raw_input("Next?")
class Generator:
def __init__(self, profilePath=""):
self.profilePath = profilePath
def generateFromProfile(self):
if self.profilePath == "":
print "Error: profile path is null"
with open(self.profilePath, 'r') as f:
self.images = yaml.load(f, Loader=yaml.FullLoader)
return self.images
def get_net_data():
netCard = "/proc/net/dev"
fd = open(netCard, "r")
for line in fd.readlines():
if line.find("enp0s3") >= 0:
field = line.split()
data = float(field[1]) / 1024.0 / 1024.0
fd.close()
return data
if __name__ == "__main__":
if len(sys.argv) == 2:
auto = True
generator = Generator(os.path.split(os.path.realpath(__file__))[0]+"/image_versions.yaml")
images = generator.generateFromProfile()
puller = Puller(images)
puller.pull()
# create a workbook sheet
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("run_time")
for row in range(len(result)):
for column in range(len(result[row])):
sheet.write(row, column, result[row][column])
workbook.save(os.path.split(os.path.realpath(__file__))[0]+"/pull.xls") | import sys
# package need to be installed, pip install docker
import docker
import time
import yaml
import os
import xlwt
auto = False
private_registry = "192.168.3.11:9999/"
# result
result = [["tag", "finishTime", "size", "data"], ]
class Puller:
def __init__(self, images):
self.images_to_pull = images
def check(self):
# detect whether the file exists, if true, delete it
if os.path.exists("./images_pulled.txt"):
os.remove("./images_pulled.txt")
def pull(self):
self.check()
client = docker.from_env()
# if don't give a tag, then all image under this registry will be pulled
repos = self.images_to_pull[0]["repo"]
for repo in repos:
tags = self.images_to_pull[1][repo]
for tag in tags:
print "start pulling: ", private_registry+repo, ":", tag
# get present time
startTime = time.time()
# get present net data
cnetdata = get_net_data()
# pull images
try:
image_pulled = client.images.pull(repository=private_registry+repo, tag=str(tag))
# print pull time
finishTime = time.time() - startTime
print "finished in " , finishTime, "s"
# get image's size
size = image_pulled.attrs[u'Size'] / 1000000.0
print "image size: ", size
data = get_net_data() - cnetdata
print "pull data: ", data
print "\n"
# record the image and its pulling time
result.append([tag, finishTime, size, data])
except docker.errors.NotFound:
print private_registry+repo + " not found...\n\n"
except docker.errors.ImageNotFound:
print private_registry+repo + " image not fount...\n\n"
if auto != True:
raw_input("Next?")
class Generator:
def __init__(self, profilePath=""):
self.profilePath = profilePath
def generateFromProfile(self):
if self.profilePath == "":
print "Error: profile path is null"
with open(self.profilePath, 'r') as f:
self.images = yaml.load(f, Loader=yaml.FullLoader)
return self.images
def get_net_data():
netCard = "/proc/net/dev"
fd = open(netCard, "r")
for line in fd.readlines():
if line.find("enp0s3") >= 0:
field = line.split()
data = float(field[1]) / 1024.0 / 1024.0
fd.close()
return data
if __name__ == "__main__":
if len(sys.argv) == 2:
auto = True
generator = Generator(os.path.split(os.path.realpath(__file__))[0]+"/image_versions.yaml")
images = generator.generateFromProfile()
puller = Puller(images)
puller.pull()
# create a workbook sheet
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("run_time")
for row in range(len(result)):
for column in range(len(result[row])):
sheet.write(row, column, result[row][column])
workbook.save(os.path.split(os.path.realpath(__file__))[0]+"/pull.xls") | en | 0.807653 | # package need to be installed, pip install docker # result # detect whether the file exists, if true, delete it # if don't give a tag, then all image under this registry will be pulled # get present time # get present net data # pull images # print pull time # get image's size # record the image and its pulling time # create a workbook sheet | 2.470561 | 2 |
jiminy/envs/vnc_wog.py | sibeshkar/jiminy | 3 | 8337 | <reponame>sibeshkar/jiminy<filename>jiminy/envs/vnc_wog.py
from jiminy.envs import vnc_env
from jiminy.spaces import VNCActionSpace
class WorldOfGooEnv(vnc_env.VNCEnv):
def __init__(self):
super(WorldOfGooEnv, self).__init__()
# TODO: set action space screen shape to match
# HACK: empty keys list fails for some weird reason, give it an 'a'
self.action_space = VNCActionSpace(keys=['a'], buttonmasks=[1])
| from jiminy.envs import vnc_env
from jiminy.spaces import VNCActionSpace
class WorldOfGooEnv(vnc_env.VNCEnv):
def __init__(self):
super(WorldOfGooEnv, self).__init__()
# TODO: set action space screen shape to match
# HACK: empty keys list fails for some weird reason, give it an 'a'
self.action_space = VNCActionSpace(keys=['a'], buttonmasks=[1]) | en | 0.743451 | # TODO: set action space screen shape to match # HACK: empty keys list fails for some weird reason, give it an 'a' | 2.276347 | 2 |
fedml_api/standalone/federated_sgan/fedssgan_api.py | arj119/FedML | 0 | 8338 | <filename>fedml_api/standalone/federated_sgan/fedssgan_api.py<gh_stars>0
import copy
import logging
import random
from typing import List, Tuple
import numpy as np
import torch
import wandb
from torch.utils.data import ConcatDataset
from fedml_api.standalone.fedavg.my_model_trainer import MyModelTrainer
from fedml_api.standalone.federated_sgan.ac_gan_model_trainer import ACGANModelTrainer
from fedml_api.standalone.federated_sgan.client import FedSSGANClient
from fedml_api.standalone.federated_sgan.model_trainer import FedSSGANModelTrainer
from fedml_api.standalone.utils.HeterogeneousModelBaseTrainerAPI import HeterogeneousModelBaseTrainerAPI
class FedSSGANAPI(HeterogeneousModelBaseTrainerAPI):
def __init__(self, dataset, device, args, adapter_model, client_models: List[Tuple[torch.nn.Module, int]]):
"""
Args:
dataset: Dataset presplit into data loaders
device: Device to run training on
args: Additional args
client_models: List of client models and their frequency participating (assuming a stateful algorithm for simplicity)
"""
super().__init__(dataset, device, args)
self.global_model = MyModelTrainer(adapter_model)
self._setup_clients(self.train_data_local_num_dict, self.train_data_local_dict, self.test_data_local_dict,
client_models)
self._plot_client_training_data_distribution()
def _setup_clients(self, train_data_local_num_dict, train_data_local_dict, test_data_local_dict,
client_models):
logging.info("############setup_clients (START)#############")
c_idx = 0
for local_model, freq in client_models:
for i in range(freq):
model_trainer = ACGANModelTrainer(
copy.deepcopy(self.global_model.model),
copy.deepcopy(local_model)
)
c = FedSSGANClient(c_idx, train_data_local_dict[c_idx], test_data_local_dict[c_idx],
train_data_local_num_dict[c_idx], self.test_global, self.args, self.device,
model_trainer)
c_idx += 1
self.client_list.append(c)
logging.info("############setup_clients (END)#############")
def train(self):
logging.info('\n###############Pre-Training clients#############\n')
for i, c in enumerate(self.client_list):
logging.info(f'Pre=training client: {i}')
c.pre_train()
logging.info('###############Pre-Training clients (END)###########\n')
unlabelled_synthesised_data = None
w_global = self.global_model.get_model_params()
for round_idx in range(self.args.comm_round):
logging.info("################Communication round : {}".format(round_idx))
w_locals = []
synthesised_data_locals = []
client_synthesised_data_lens = {'round': round_idx}
client: FedSSGANClient
for idx, client in enumerate(self.client_list):
# Update client synthetic datasets
# client.set_synthetic_dataset(unlabelled_synthesised_data)
# Local round
w = client.train(copy.deepcopy(w_global), round_idx)
# self.logger.info("local weights = " + str(w))
w_locals.append((client.get_sample_number(), copy.deepcopy(w)))
# synthetic_data = client.generate_synthetic_dataset()
# if synthetic_data is not None:
# synthesised_data_locals.append(synthetic_data)
# client_synthesised_data_lens[f'Client_{idx}: Synthetic Dataset Size'] = len(synthetic_data)
# else:
# client_synthesised_data_lens[f'Client_{idx}: Synthetic Dataset Size'] = 0
#
# if len(synthesised_data_locals) > 0:
# unlabelled_synthesised_data = ConcatDataset(synthesised_data_locals)
# logging.info(f'\n Synthetic Unlabelled Dataset Size: {len(unlabelled_synthesised_data)}\n')
# client_synthesised_data_lens['Total Synthetic Dataset Size'] = len(unlabelled_synthesised_data)
# else:
# unlabelled_synthesised_data = None
# client_synthesised_data_lens['Total Synthetic Dataset Size'] = 0
# wandb.log(client_synthesised_data_lens)
# update global weights
w_global = self._aggregate(w_locals)
self.global_model.set_model_params(w_global)
# test results
# at last round
if round_idx == self.args.comm_round - 1:
self._local_test_on_all_clients(round_idx)
# per {frequency_of_the_test} round
elif round_idx % self.args.frequency_of_the_test == 0:
if self.args.dataset.startswith("stackoverflow"):
self._local_test_on_validation_set(round_idx)
else:
self._local_test_on_all_clients(round_idx)
| <filename>fedml_api/standalone/federated_sgan/fedssgan_api.py<gh_stars>0
import copy
import logging
import random
from typing import List, Tuple
import numpy as np
import torch
import wandb
from torch.utils.data import ConcatDataset
from fedml_api.standalone.fedavg.my_model_trainer import MyModelTrainer
from fedml_api.standalone.federated_sgan.ac_gan_model_trainer import ACGANModelTrainer
from fedml_api.standalone.federated_sgan.client import FedSSGANClient
from fedml_api.standalone.federated_sgan.model_trainer import FedSSGANModelTrainer
from fedml_api.standalone.utils.HeterogeneousModelBaseTrainerAPI import HeterogeneousModelBaseTrainerAPI
class FedSSGANAPI(HeterogeneousModelBaseTrainerAPI):
def __init__(self, dataset, device, args, adapter_model, client_models: List[Tuple[torch.nn.Module, int]]):
"""
Args:
dataset: Dataset presplit into data loaders
device: Device to run training on
args: Additional args
client_models: List of client models and their frequency participating (assuming a stateful algorithm for simplicity)
"""
super().__init__(dataset, device, args)
self.global_model = MyModelTrainer(adapter_model)
self._setup_clients(self.train_data_local_num_dict, self.train_data_local_dict, self.test_data_local_dict,
client_models)
self._plot_client_training_data_distribution()
def _setup_clients(self, train_data_local_num_dict, train_data_local_dict, test_data_local_dict,
client_models):
logging.info("############setup_clients (START)#############")
c_idx = 0
for local_model, freq in client_models:
for i in range(freq):
model_trainer = ACGANModelTrainer(
copy.deepcopy(self.global_model.model),
copy.deepcopy(local_model)
)
c = FedSSGANClient(c_idx, train_data_local_dict[c_idx], test_data_local_dict[c_idx],
train_data_local_num_dict[c_idx], self.test_global, self.args, self.device,
model_trainer)
c_idx += 1
self.client_list.append(c)
logging.info("############setup_clients (END)#############")
def train(self):
logging.info('\n###############Pre-Training clients#############\n')
for i, c in enumerate(self.client_list):
logging.info(f'Pre=training client: {i}')
c.pre_train()
logging.info('###############Pre-Training clients (END)###########\n')
unlabelled_synthesised_data = None
w_global = self.global_model.get_model_params()
for round_idx in range(self.args.comm_round):
logging.info("################Communication round : {}".format(round_idx))
w_locals = []
synthesised_data_locals = []
client_synthesised_data_lens = {'round': round_idx}
client: FedSSGANClient
for idx, client in enumerate(self.client_list):
# Update client synthetic datasets
# client.set_synthetic_dataset(unlabelled_synthesised_data)
# Local round
w = client.train(copy.deepcopy(w_global), round_idx)
# self.logger.info("local weights = " + str(w))
w_locals.append((client.get_sample_number(), copy.deepcopy(w)))
# synthetic_data = client.generate_synthetic_dataset()
# if synthetic_data is not None:
# synthesised_data_locals.append(synthetic_data)
# client_synthesised_data_lens[f'Client_{idx}: Synthetic Dataset Size'] = len(synthetic_data)
# else:
# client_synthesised_data_lens[f'Client_{idx}: Synthetic Dataset Size'] = 0
#
# if len(synthesised_data_locals) > 0:
# unlabelled_synthesised_data = ConcatDataset(synthesised_data_locals)
# logging.info(f'\n Synthetic Unlabelled Dataset Size: {len(unlabelled_synthesised_data)}\n')
# client_synthesised_data_lens['Total Synthetic Dataset Size'] = len(unlabelled_synthesised_data)
# else:
# unlabelled_synthesised_data = None
# client_synthesised_data_lens['Total Synthetic Dataset Size'] = 0
# wandb.log(client_synthesised_data_lens)
# update global weights
w_global = self._aggregate(w_locals)
self.global_model.set_model_params(w_global)
# test results
# at last round
if round_idx == self.args.comm_round - 1:
self._local_test_on_all_clients(round_idx)
# per {frequency_of_the_test} round
elif round_idx % self.args.frequency_of_the_test == 0:
if self.args.dataset.startswith("stackoverflow"):
self._local_test_on_validation_set(round_idx)
else:
self._local_test_on_all_clients(round_idx)
| en | 0.572174 | Args: dataset: Dataset presplit into data loaders device: Device to run training on args: Additional args client_models: List of client models and their frequency participating (assuming a stateful algorithm for simplicity) ###########setup_clients (START)#############") ###########setup_clients (END)#############") ###############Pre-Training clients#############\n') ##############Pre-Training clients (END)###########\n') ###############Communication round : {}".format(round_idx)) # Update client synthetic datasets # client.set_synthetic_dataset(unlabelled_synthesised_data) # Local round # self.logger.info("local weights = " + str(w)) # synthetic_data = client.generate_synthetic_dataset() # if synthetic_data is not None: # synthesised_data_locals.append(synthetic_data) # client_synthesised_data_lens[f'Client_{idx}: Synthetic Dataset Size'] = len(synthetic_data) # else: # client_synthesised_data_lens[f'Client_{idx}: Synthetic Dataset Size'] = 0 # # if len(synthesised_data_locals) > 0: # unlabelled_synthesised_data = ConcatDataset(synthesised_data_locals) # logging.info(f'\n Synthetic Unlabelled Dataset Size: {len(unlabelled_synthesised_data)}\n') # client_synthesised_data_lens['Total Synthetic Dataset Size'] = len(unlabelled_synthesised_data) # else: # unlabelled_synthesised_data = None # client_synthesised_data_lens['Total Synthetic Dataset Size'] = 0 # wandb.log(client_synthesised_data_lens) # update global weights # test results # at last round # per {frequency_of_the_test} round | 2.082507 | 2 |
pytorch-word2vec-master/csv.py | arjun-sai-krishnan/tamil-morpho-embeddings | 2 | 8339 | <filename>pytorch-word2vec-master/csv.py<gh_stars>1-10
#!/usr/bin/env python3
import argparse
from collections import Counter
import pdb
import pickle
import re
import sys
import time
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import torch.multiprocessing as mp
import data_producer
from multiprocessing import set_start_method
parser = argparse.ArgumentParser()
parser.add_argument("--train", type=str, default="", help="training file")
parser.add_argument("--vocab", type=str, default="", help="vocab pickle file")
parser.add_argument("--save", type=str, default="csv.pth.tar", help="saved model filename")
parser.add_argument("--size", type=int, default=300, help="word embedding dimension")
parser.add_argument("--window", type=int, default=5, help="context window size")
parser.add_argument("--sample", type=float, default=1e-5, help="subsample threshold")
parser.add_argument("--negative", type=int, default=10, help="number of negative samples")
parser.add_argument("--delta", type=float, default=0.15, help="create new sense for a type if similarity lower than this value.")
parser.add_argument("--min_count", type=int, default=5, help="minimum frequency of a word")
parser.add_argument("--processes", type=int, default=4, help="number of processes")
parser.add_argument("--num_workers", type=int, default=6, help="number of workers for data processsing")
parser.add_argument("--iter", type=int, default=3, help="number of iterations")
parser.add_argument("--lr", type=float, default=-1.0, help="initial learning rate")
parser.add_argument("--batch_size", type=int, default=100, help="(max) batch size")
parser.add_argument("--cuda", action='store_true', default=False, help="enable cuda")
parser.add_argument("--multi_proto", action='store_true', default=False, help="True: multi-prototype, False:single-prototype")
MAX_SENT_LEN = 1000
# Build the vocabulary.
def file_split(f, delim=' \t\n', bufsize=1024):
prev = ''
while True:
s = f.read(bufsize)
if not s:
break
tokens = re.split('['+delim+']{1,}', s)
if len(tokens) > 1:
yield prev + tokens[0]
prev = tokens[-1]
for x in tokens[1:-1]:
yield x
else:
prev += s
if prev:
yield prev
def build_vocab(args):
vocab = Counter()
word_count = 0
for word in file_split(open(args.train)):
vocab[word] += 1
word_count += 1
if word_count % 10000 == 0:
sys.stdout.write('%d\r' % len(vocab))
freq = {k:v for k,v in vocab.items() if v >= args.min_count}
word_count = sum([freq[k] for k in freq])
word_list = sorted(freq, key=freq.get, reverse=True)
word2idx = {}
for i,w in enumerate(word_list):
word2idx[w] = i
print("Vocab size: %ld" % len(word2idx))
print("Words in train file: %ld" % word_count)
vars(args)['vocab_size'] = len(word2idx)
vars(args)['train_words'] = word_count
return word2idx, word_list, freq
class CSV(nn.Module):
def __init__(self, args):
super(CSV, self).__init__()
self.global_embs = nn.Embedding(args.vocab_size+1, args.size, padding_idx=args.vocab_size, sparse=True)
self.sense_embs = nn.Embedding(args.vocab_size*5, args.size, sparse=True)
self.ctx_weight = torch.nn.Parameter(torch.ones(2*args.window, args.size))
self.word2sense = [ [i] for i in range(args.vocab_size) ]
'''
word2sense = np.zeros((args.vocab_size, 5), dtype='int32')
for i in range(args.vocab_size):
word2sense[i, 0] = i
self.word2sense = torch.nn.Parameter(torch.from_numpy(word2sense).int())
self.word_sense_cnts = torch.nn.Parameter(torch.ones((args.vocab_size,)).int())
'''
self.global_embs.weight.data.uniform_(-0.5/args.size, 0.5/args.size)
self.sense_embs.weight.data.uniform_(-0.5/args.size, 0.5/args.size)
self.n_senses = args.vocab_size
self.sense_capacity = args.vocab_size*5
self.batch_size = args.batch_size
self.size = args.size
self.window = args.window
self.negative = args.negative
self.pad_idx = args.vocab_size
def get_context_feats(self, ctx_type_indices):
ctx_type_embs = self.global_embs(ctx_type_indices)
return torch.sum(ctx_type_embs * self.ctx_weight, 1).cpu().data.numpy()
def get_possible_sense_embs(self, type_indices, cuda=True):
sense_indices = []
sense2idx = {}
for type_id in type_indices:
for s_id in self.word2sense[type_id]:
if s_id not in sense2idx:
sense2idx[s_id] = len(sense_indices)
sense_indices.append( s_id )
sense_indices = np.array(sense_indices)
if cuda:
sense_embs = self.sense_embs(Variable(torch.LongTensor(sense_indices).cuda()))
return sense2idx, sense_embs.cpu().data.numpy()
else:
sense_embs = self.sense_embs(Variable(torch.LongTensor(sense_indices)))
return sense2idx, sense_embs.data.numpy()
def forward(self, data):
ctx_type_indices = data[:, 0:2*self.window]
pos_sense_idx = data[:, 2*self.window+1]
neg_sense_indices = data[:, 2*self.window+2:2*self.window+2+self.negative]
neg_mask = data[:, 2*self.window+2+self.negative:].float()
ctx_type_embs = self.global_embs(ctx_type_indices)
pos_sense_embs = self.sense_embs(pos_sense_idx)
neg_sense_embs = self.sense_embs(neg_sense_indices)
ctx_feats = torch.sum(ctx_type_embs * self.ctx_weight, 1, keepdim=True)
# Neg Log Likelihood
pos_ips = torch.sum(ctx_feats[:,0,:] * pos_sense_embs, 1)
pos_loss = torch.sum( -F.logsigmoid(torch.clamp(pos_ips,max=10,min=-10)))
neg_ips = torch.bmm(neg_sense_embs, ctx_feats.permute(0,2,1))[:,:,0]
neg_loss = torch.sum( -F.logsigmoid(torch.clamp(-neg_ips,max=10,min=-10)) * neg_mask )
return pos_loss + neg_loss
# Initialize model.
def init_net(args):
if args.lr == -1.0:
vars(args)['lr'] = 0.05
return CSV(args)
def save_model(filename, model, args, word2idx):
torch.save({
'word2idx':word2idx,
'args':args,
#'word2sense': model.word2sense,
'n_senses': model.n_senses,
'params': model.state_dict()
}, filename)
def load_model(filename):
checkpoint = torch.load(filename)
word2idx = checkpoint['word2idx']
args = checkpoint['args']
model = CSV(args)
if args.cuda:
model.cuda()
model.global_embs.weight.data = checkpoint['params']['global_embs.weight']
model.sense_embs.weight.data = checkpoint['params']['sense_embs.weight']
model.ctx_weight.data = checkpoint['params']['ctx_weight']
model.word2sense = checkpoint['word2sense']
#model.word2sense.data = checkpoint['params']['word2sense']
#model.word_sense_cnts.data = checkpoint['params']['word_sense_cnts']
model.n_senses = checkpoint['n_senses']
return model, word2idx
# Training
def train_process_sent_producer(p_id, data_queue, word_count_actual, word_list, word2idx, freq, args):
n_proc = 1 if args.stage == 2 else args.processes
N = 1 if args.stage == 2 else args.iter
neg = 0 if args.stage == 2 else args.negative
if args.negative > 0:
table_ptr_val = data_producer.init_unigram_table(word_list, freq, args.train_words)
train_file = open(args.train)
file_pos = args.file_size * p_id // n_proc
train_file.seek(file_pos, 0)
while True:
try:
train_file.read(1)
except UnicodeDecodeError:
file_pos -= 1
train_file.seek(file_pos, 0)
else:
train_file.seek(file_pos, 0)
break
batch_count = 0
batch_placeholder = np.zeros((args.batch_size, 2*args.window+2+2*neg), 'int64')
for it in range(N):
train_file.seek(file_pos, 0)
last_word_cnt = 0
word_cnt = 0
sentence = []
prev = ''
eof = False
while True:
if eof or train_file.tell() > file_pos + args.file_size / n_proc:
break
while True:
s = train_file.read(1)
if not s:
eof = True
break
elif s == ' ' or s == '\t':
if prev in word2idx:
sentence.append(prev)
prev = ''
if len(sentence) >= MAX_SENT_LEN:
break
elif s == '\n':
if prev in word2idx:
sentence.append(prev)
prev = ''
break
else:
prev += s
if len(sentence) > 0:
# subsampling
sent_id = []
if args.sample != 0:
sent_len = len(sentence)
i = 0
while i < sent_len:
word = sentence[i]
f = freq[word] / args.train_words
pb = (np.sqrt(f / args.sample) + 1) * args.sample / f;
if pb > np.random.random_sample():
sent_id.append( word2idx[word] )
i += 1
if len(sent_id) < 2:
word_cnt += len(sentence)
sentence.clear()
continue
next_random = (2**24) * np.random.randint(0, 2**24) + np.random.randint(0, 2**24)
chunk = data_producer.cbow_producer(sent_id, len(sent_id), table_ptr_val, args.window,
neg, args.vocab_size, args.batch_size, next_random)
chunk_pos = 0
while chunk_pos < chunk.shape[0]:
remain_space = args.batch_size - batch_count
remain_chunk = chunk.shape[0] - chunk_pos
if remain_chunk < remain_space:
take_from_chunk = remain_chunk
else:
take_from_chunk = remain_space
batch_placeholder[batch_count:batch_count+take_from_chunk, :] = chunk[chunk_pos:chunk_pos+take_from_chunk, :]
batch_count += take_from_chunk
if batch_count == args.batch_size:
data_queue.put(batch_placeholder)
batch_count = 0
chunk_pos += take_from_chunk
word_cnt += len(sentence)
if word_cnt - last_word_cnt > 10000:
with word_count_actual.get_lock():
word_count_actual.value += word_cnt - last_word_cnt
last_word_cnt = word_cnt
sentence.clear()
with word_count_actual.get_lock():
word_count_actual.value += word_cnt - last_word_cnt
print(p_id, it, file_pos, train_file.tell(), args.file_size)
if batch_count > 0:
data_queue.put(batch_placeholder[:batch_count,:])
data_queue.put(None)
print(p_id, file_pos, train_file.tell(), args.file_size)
def train_process(p_id, word_count_actual, word2idx, word_list, freq, args, model):
data_queue = mp.SimpleQueue()
lr = args.lr
#optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
optimizer = optim.Adagrad(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
t = mp.Process(target=train_process_sent_producer, args=(p_id, data_queue, word_count_actual, word_list, word2idx, freq, args))
t.start()
#n_iter = 1 if args.stage == 2 else args.iter
n_iter = args.iter
# get from data_queue and feed to model
prev_word_cnt = 0
while True:
chunk = data_queue.get()
if chunk is None:
break
else:
# lr anneal & output
if word_count_actual.value - prev_word_cnt > 10000:
#if args.lr_anneal:
# lr = args.lr * (1 - word_count_actual.value / (n_iter * args.train_words))
# if lr < 0.0001 * args.lr:
# lr = 0.0001 * args.lr
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
#sys.stdout.write("\rAlpha: %0.8f, Progess: %0.2f, Words/sec: %f, word_cnt: %d" % (lr, word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value))
sys.stdout.write("\rProgess: %0.2f, Words/sec: %f, word_cnt: %d" % (word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value))
sys.stdout.flush()
prev_word_cnt = word_count_actual.value
if args.stage == 1:
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
optimizer.zero_grad()
loss = model(data)
loss.backward()
optimizer.step()
model.global_embs.weight.data[args.vocab_size].fill_(0)
elif args.stage == 3:
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
#type_ids = chunk[:, 2*args.window+1:2*args.window+2+2*args.negative]
type_ids = chunk[:, 2*args.window+1:2*args.window+2+args.negative]
type_ids = np.reshape(type_ids, (type_ids.shape[0] * type_ids.shape[1]))
sense2idx, sense_embs = model.get_possible_sense_embs(type_ids.tolist())
# get type_idx from chunk, and do sense selection here.
context_feats = model.get_context_feats(data[:, :2*args.window])
chunk = data_producer.select_sense(chunk, context_feats, sense2idx, sense_embs,
model.word2sense, chunk.shape[0], args.size, args.window, args.negative)
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
optimizer.zero_grad()
loss = model(data)
loss.backward()
optimizer.step()
model.global_embs.weight.data[args.vocab_size].fill_(0)
t.join()
def train_process_stage2(p_id, word_count_actual, word2idx, word_list, freq, args, model):
data_queue = mp.SimpleQueue()
sense_embs = model.sense_embs.weight.data.numpy()
counter_list = np.zeros((model.sense_capacity), dtype='float32')
t = mp.Process(target=train_process_sent_producer, args=(p_id, data_queue, word_count_actual, word_list, word2idx, freq, args))
t.start()
n_iter = 1
# get from data_queue and feed to model
prev_word_cnt = 0
while True:
chunk = data_queue.get()
if chunk is None:
break
else:
if word_count_actual.value - prev_word_cnt > 10000:
sys.stdout.write("\rProgess: %0.2f, Words/sec: %f, word_cnt: %d" % (word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value))
sys.stdout.flush()
prev_word_cnt = word_count_actual.value
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
context_feats = model.get_context_feats(data[:, :2*args.window])
# update sense_embs
create_cnt = data_producer.create_n_update_sense(chunk[:, 2*args.window+1], context_feats, sense_embs, model.word2sense, counter_list, chunk.shape[0], args.size, args.delta, model.n_senses)
model.n_senses += create_cnt
#if model.n_senses + args.batch_size > model.sense_capacity:
# new_capacity = model.sense_capacity * 3 // 2
# counter_list = np.concatenate( (counter_list, np.ones((new_capacity - model.sense_capacity),dtype='float32')), axis=0)
# zero = np.zeros((new_capacity - model.sense_capacity, args.size), 'float32')
# sense_embs = np.concatenate((sense_embs, zero), 0)
# model.sense_capacity = new_capacity
# print("\nexapnded sense_embs: %d" % model.n_senses)
t.join()
sense_embs[:model.n_senses, :] = sense_embs[:model.n_senses, :] / counter_list[:model.n_senses, None]
if __name__ == '__main__':
set_start_method('forkserver')
args = parser.parse_args()
print("Starting training using file %s" % args.train)
train_file = open(args.train)
train_file.seek(0, 2)
vars(args)['file_size'] = train_file.tell()
word_count_actual = mp.Value('L', 0)
if args.vocab == '':
word2idx, word_list, freq = build_vocab(args)
else:
with open(args.vocab, 'rb') as f:
word2idx, word_list, freq, pos2idx, dep2id = pickle.load(f)
word_count = sum([freq[k] for k in freq])
vars(args)['vocab_size'] = len(word2idx)
vars(args)['train_words'] = word_count
print("Vocab size: %ld" % len(word2idx))
print("Words in train file: %ld" % word_count)
model = init_net(args)
model.share_memory()
if args.cuda:
model.cuda()
# stage 1, learn robust context representation.
vars(args)['stage'] = 1
print("Stage 1")
vars(args)['lr_anneal'] = True
vars(args)['t_start'] = time.monotonic()
processes = []
for p_id in range(args.processes):
p = mp.Process(target=train_process, args=(p_id, word_count_actual, word2idx, word_list, freq, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
del processes
print("\nStage 1, ", time.monotonic() - args.t_start, " secs ", word_count_actual.value)
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage1.pth.tar'
save_model(filename, model, args, word2idx)
if args.multi_proto:
# stage 2, create new sense in a non-parametric way.
# Freeze model paramters except sense_embs, and use only 1 process to prevent race condition
old_batch_size = vars(args)['batch_size']
model.global_embs.requires_grad = False
model.ctx_weight.requires_grad = False
model.sense_embs = model.sense_embs.cpu()
vars(args)['stage'] = 2
vars(args)['batch_size'] = 5000
print("\nStage 2")
word_count_actual.value = 0
vars(args)['t_start'] = time.monotonic()
train_process_stage2(0, word_count_actual, word2idx, word_list, freq, args, model)
if args.cuda:
model.cuda()
print("\nStage 2, ", time.monotonic() - args.t_start, " secs")
print("Current # of senses: %d" % model.n_senses)
pdb.set_trace()
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage2.pth.tar'
save_model(filename, model, args, word2idx)
# stage 3, no more sense creation.
vars(args)['lr'] = args.lr * 0.01
vars(args)['batch_size'] = old_batch_size
model.global_embs.requires_grad = True
model.ctx_weight.requires_grad = True
vars(args)['stage'] = 3
print("\nBegin stage 3")
word_count_actual.value = 0
vars(args)['t_start'] = time.monotonic()
processes = []
for p_id in range(args.processes):
p = mp.Process(target=train_process, args=(p_id, word_count_actual, word2idx, word_list, freq, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
print("\nStage 3, ", time.monotonic() - args.t_start, " secs")
# save model
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage3.pth.tar'
save_model(filename, model, args, word2idx)
print("")
| <filename>pytorch-word2vec-master/csv.py<gh_stars>1-10
#!/usr/bin/env python3
import argparse
from collections import Counter
import pdb
import pickle
import re
import sys
import time
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import torch.multiprocessing as mp
import data_producer
from multiprocessing import set_start_method
parser = argparse.ArgumentParser()
parser.add_argument("--train", type=str, default="", help="training file")
parser.add_argument("--vocab", type=str, default="", help="vocab pickle file")
parser.add_argument("--save", type=str, default="csv.pth.tar", help="saved model filename")
parser.add_argument("--size", type=int, default=300, help="word embedding dimension")
parser.add_argument("--window", type=int, default=5, help="context window size")
parser.add_argument("--sample", type=float, default=1e-5, help="subsample threshold")
parser.add_argument("--negative", type=int, default=10, help="number of negative samples")
parser.add_argument("--delta", type=float, default=0.15, help="create new sense for a type if similarity lower than this value.")
parser.add_argument("--min_count", type=int, default=5, help="minimum frequency of a word")
parser.add_argument("--processes", type=int, default=4, help="number of processes")
parser.add_argument("--num_workers", type=int, default=6, help="number of workers for data processsing")
parser.add_argument("--iter", type=int, default=3, help="number of iterations")
parser.add_argument("--lr", type=float, default=-1.0, help="initial learning rate")
parser.add_argument("--batch_size", type=int, default=100, help="(max) batch size")
parser.add_argument("--cuda", action='store_true', default=False, help="enable cuda")
parser.add_argument("--multi_proto", action='store_true', default=False, help="True: multi-prototype, False:single-prototype")
MAX_SENT_LEN = 1000
# Build the vocabulary.
def file_split(f, delim=' \t\n', bufsize=1024):
prev = ''
while True:
s = f.read(bufsize)
if not s:
break
tokens = re.split('['+delim+']{1,}', s)
if len(tokens) > 1:
yield prev + tokens[0]
prev = tokens[-1]
for x in tokens[1:-1]:
yield x
else:
prev += s
if prev:
yield prev
def build_vocab(args):
vocab = Counter()
word_count = 0
for word in file_split(open(args.train)):
vocab[word] += 1
word_count += 1
if word_count % 10000 == 0:
sys.stdout.write('%d\r' % len(vocab))
freq = {k:v for k,v in vocab.items() if v >= args.min_count}
word_count = sum([freq[k] for k in freq])
word_list = sorted(freq, key=freq.get, reverse=True)
word2idx = {}
for i,w in enumerate(word_list):
word2idx[w] = i
print("Vocab size: %ld" % len(word2idx))
print("Words in train file: %ld" % word_count)
vars(args)['vocab_size'] = len(word2idx)
vars(args)['train_words'] = word_count
return word2idx, word_list, freq
class CSV(nn.Module):
def __init__(self, args):
super(CSV, self).__init__()
self.global_embs = nn.Embedding(args.vocab_size+1, args.size, padding_idx=args.vocab_size, sparse=True)
self.sense_embs = nn.Embedding(args.vocab_size*5, args.size, sparse=True)
self.ctx_weight = torch.nn.Parameter(torch.ones(2*args.window, args.size))
self.word2sense = [ [i] for i in range(args.vocab_size) ]
'''
word2sense = np.zeros((args.vocab_size, 5), dtype='int32')
for i in range(args.vocab_size):
word2sense[i, 0] = i
self.word2sense = torch.nn.Parameter(torch.from_numpy(word2sense).int())
self.word_sense_cnts = torch.nn.Parameter(torch.ones((args.vocab_size,)).int())
'''
self.global_embs.weight.data.uniform_(-0.5/args.size, 0.5/args.size)
self.sense_embs.weight.data.uniform_(-0.5/args.size, 0.5/args.size)
self.n_senses = args.vocab_size
self.sense_capacity = args.vocab_size*5
self.batch_size = args.batch_size
self.size = args.size
self.window = args.window
self.negative = args.negative
self.pad_idx = args.vocab_size
def get_context_feats(self, ctx_type_indices):
ctx_type_embs = self.global_embs(ctx_type_indices)
return torch.sum(ctx_type_embs * self.ctx_weight, 1).cpu().data.numpy()
def get_possible_sense_embs(self, type_indices, cuda=True):
sense_indices = []
sense2idx = {}
for type_id in type_indices:
for s_id in self.word2sense[type_id]:
if s_id not in sense2idx:
sense2idx[s_id] = len(sense_indices)
sense_indices.append( s_id )
sense_indices = np.array(sense_indices)
if cuda:
sense_embs = self.sense_embs(Variable(torch.LongTensor(sense_indices).cuda()))
return sense2idx, sense_embs.cpu().data.numpy()
else:
sense_embs = self.sense_embs(Variable(torch.LongTensor(sense_indices)))
return sense2idx, sense_embs.data.numpy()
def forward(self, data):
ctx_type_indices = data[:, 0:2*self.window]
pos_sense_idx = data[:, 2*self.window+1]
neg_sense_indices = data[:, 2*self.window+2:2*self.window+2+self.negative]
neg_mask = data[:, 2*self.window+2+self.negative:].float()
ctx_type_embs = self.global_embs(ctx_type_indices)
pos_sense_embs = self.sense_embs(pos_sense_idx)
neg_sense_embs = self.sense_embs(neg_sense_indices)
ctx_feats = torch.sum(ctx_type_embs * self.ctx_weight, 1, keepdim=True)
# Neg Log Likelihood
pos_ips = torch.sum(ctx_feats[:,0,:] * pos_sense_embs, 1)
pos_loss = torch.sum( -F.logsigmoid(torch.clamp(pos_ips,max=10,min=-10)))
neg_ips = torch.bmm(neg_sense_embs, ctx_feats.permute(0,2,1))[:,:,0]
neg_loss = torch.sum( -F.logsigmoid(torch.clamp(-neg_ips,max=10,min=-10)) * neg_mask )
return pos_loss + neg_loss
# Initialize model.
def init_net(args):
if args.lr == -1.0:
vars(args)['lr'] = 0.05
return CSV(args)
def save_model(filename, model, args, word2idx):
torch.save({
'word2idx':word2idx,
'args':args,
#'word2sense': model.word2sense,
'n_senses': model.n_senses,
'params': model.state_dict()
}, filename)
def load_model(filename):
checkpoint = torch.load(filename)
word2idx = checkpoint['word2idx']
args = checkpoint['args']
model = CSV(args)
if args.cuda:
model.cuda()
model.global_embs.weight.data = checkpoint['params']['global_embs.weight']
model.sense_embs.weight.data = checkpoint['params']['sense_embs.weight']
model.ctx_weight.data = checkpoint['params']['ctx_weight']
model.word2sense = checkpoint['word2sense']
#model.word2sense.data = checkpoint['params']['word2sense']
#model.word_sense_cnts.data = checkpoint['params']['word_sense_cnts']
model.n_senses = checkpoint['n_senses']
return model, word2idx
# Training
def train_process_sent_producer(p_id, data_queue, word_count_actual, word_list, word2idx, freq, args):
n_proc = 1 if args.stage == 2 else args.processes
N = 1 if args.stage == 2 else args.iter
neg = 0 if args.stage == 2 else args.negative
if args.negative > 0:
table_ptr_val = data_producer.init_unigram_table(word_list, freq, args.train_words)
train_file = open(args.train)
file_pos = args.file_size * p_id // n_proc
train_file.seek(file_pos, 0)
while True:
try:
train_file.read(1)
except UnicodeDecodeError:
file_pos -= 1
train_file.seek(file_pos, 0)
else:
train_file.seek(file_pos, 0)
break
batch_count = 0
batch_placeholder = np.zeros((args.batch_size, 2*args.window+2+2*neg), 'int64')
for it in range(N):
train_file.seek(file_pos, 0)
last_word_cnt = 0
word_cnt = 0
sentence = []
prev = ''
eof = False
while True:
if eof or train_file.tell() > file_pos + args.file_size / n_proc:
break
while True:
s = train_file.read(1)
if not s:
eof = True
break
elif s == ' ' or s == '\t':
if prev in word2idx:
sentence.append(prev)
prev = ''
if len(sentence) >= MAX_SENT_LEN:
break
elif s == '\n':
if prev in word2idx:
sentence.append(prev)
prev = ''
break
else:
prev += s
if len(sentence) > 0:
# subsampling
sent_id = []
if args.sample != 0:
sent_len = len(sentence)
i = 0
while i < sent_len:
word = sentence[i]
f = freq[word] / args.train_words
pb = (np.sqrt(f / args.sample) + 1) * args.sample / f;
if pb > np.random.random_sample():
sent_id.append( word2idx[word] )
i += 1
if len(sent_id) < 2:
word_cnt += len(sentence)
sentence.clear()
continue
next_random = (2**24) * np.random.randint(0, 2**24) + np.random.randint(0, 2**24)
chunk = data_producer.cbow_producer(sent_id, len(sent_id), table_ptr_val, args.window,
neg, args.vocab_size, args.batch_size, next_random)
chunk_pos = 0
while chunk_pos < chunk.shape[0]:
remain_space = args.batch_size - batch_count
remain_chunk = chunk.shape[0] - chunk_pos
if remain_chunk < remain_space:
take_from_chunk = remain_chunk
else:
take_from_chunk = remain_space
batch_placeholder[batch_count:batch_count+take_from_chunk, :] = chunk[chunk_pos:chunk_pos+take_from_chunk, :]
batch_count += take_from_chunk
if batch_count == args.batch_size:
data_queue.put(batch_placeholder)
batch_count = 0
chunk_pos += take_from_chunk
word_cnt += len(sentence)
if word_cnt - last_word_cnt > 10000:
with word_count_actual.get_lock():
word_count_actual.value += word_cnt - last_word_cnt
last_word_cnt = word_cnt
sentence.clear()
with word_count_actual.get_lock():
word_count_actual.value += word_cnt - last_word_cnt
print(p_id, it, file_pos, train_file.tell(), args.file_size)
if batch_count > 0:
data_queue.put(batch_placeholder[:batch_count,:])
data_queue.put(None)
print(p_id, file_pos, train_file.tell(), args.file_size)
def train_process(p_id, word_count_actual, word2idx, word_list, freq, args, model):
data_queue = mp.SimpleQueue()
lr = args.lr
#optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
optimizer = optim.Adagrad(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
t = mp.Process(target=train_process_sent_producer, args=(p_id, data_queue, word_count_actual, word_list, word2idx, freq, args))
t.start()
#n_iter = 1 if args.stage == 2 else args.iter
n_iter = args.iter
# get from data_queue and feed to model
prev_word_cnt = 0
while True:
chunk = data_queue.get()
if chunk is None:
break
else:
# lr anneal & output
if word_count_actual.value - prev_word_cnt > 10000:
#if args.lr_anneal:
# lr = args.lr * (1 - word_count_actual.value / (n_iter * args.train_words))
# if lr < 0.0001 * args.lr:
# lr = 0.0001 * args.lr
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
#sys.stdout.write("\rAlpha: %0.8f, Progess: %0.2f, Words/sec: %f, word_cnt: %d" % (lr, word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value))
sys.stdout.write("\rProgess: %0.2f, Words/sec: %f, word_cnt: %d" % (word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value))
sys.stdout.flush()
prev_word_cnt = word_count_actual.value
if args.stage == 1:
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
optimizer.zero_grad()
loss = model(data)
loss.backward()
optimizer.step()
model.global_embs.weight.data[args.vocab_size].fill_(0)
elif args.stage == 3:
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
#type_ids = chunk[:, 2*args.window+1:2*args.window+2+2*args.negative]
type_ids = chunk[:, 2*args.window+1:2*args.window+2+args.negative]
type_ids = np.reshape(type_ids, (type_ids.shape[0] * type_ids.shape[1]))
sense2idx, sense_embs = model.get_possible_sense_embs(type_ids.tolist())
# get type_idx from chunk, and do sense selection here.
context_feats = model.get_context_feats(data[:, :2*args.window])
chunk = data_producer.select_sense(chunk, context_feats, sense2idx, sense_embs,
model.word2sense, chunk.shape[0], args.size, args.window, args.negative)
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
optimizer.zero_grad()
loss = model(data)
loss.backward()
optimizer.step()
model.global_embs.weight.data[args.vocab_size].fill_(0)
t.join()
def train_process_stage2(p_id, word_count_actual, word2idx, word_list, freq, args, model):
data_queue = mp.SimpleQueue()
sense_embs = model.sense_embs.weight.data.numpy()
counter_list = np.zeros((model.sense_capacity), dtype='float32')
t = mp.Process(target=train_process_sent_producer, args=(p_id, data_queue, word_count_actual, word_list, word2idx, freq, args))
t.start()
n_iter = 1
# get from data_queue and feed to model
prev_word_cnt = 0
while True:
chunk = data_queue.get()
if chunk is None:
break
else:
if word_count_actual.value - prev_word_cnt > 10000:
sys.stdout.write("\rProgess: %0.2f, Words/sec: %f, word_cnt: %d" % (word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value))
sys.stdout.flush()
prev_word_cnt = word_count_actual.value
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
context_feats = model.get_context_feats(data[:, :2*args.window])
# update sense_embs
create_cnt = data_producer.create_n_update_sense(chunk[:, 2*args.window+1], context_feats, sense_embs, model.word2sense, counter_list, chunk.shape[0], args.size, args.delta, model.n_senses)
model.n_senses += create_cnt
#if model.n_senses + args.batch_size > model.sense_capacity:
# new_capacity = model.sense_capacity * 3 // 2
# counter_list = np.concatenate( (counter_list, np.ones((new_capacity - model.sense_capacity),dtype='float32')), axis=0)
# zero = np.zeros((new_capacity - model.sense_capacity, args.size), 'float32')
# sense_embs = np.concatenate((sense_embs, zero), 0)
# model.sense_capacity = new_capacity
# print("\nexapnded sense_embs: %d" % model.n_senses)
t.join()
sense_embs[:model.n_senses, :] = sense_embs[:model.n_senses, :] / counter_list[:model.n_senses, None]
if __name__ == '__main__':
set_start_method('forkserver')
args = parser.parse_args()
print("Starting training using file %s" % args.train)
train_file = open(args.train)
train_file.seek(0, 2)
vars(args)['file_size'] = train_file.tell()
word_count_actual = mp.Value('L', 0)
if args.vocab == '':
word2idx, word_list, freq = build_vocab(args)
else:
with open(args.vocab, 'rb') as f:
word2idx, word_list, freq, pos2idx, dep2id = pickle.load(f)
word_count = sum([freq[k] for k in freq])
vars(args)['vocab_size'] = len(word2idx)
vars(args)['train_words'] = word_count
print("Vocab size: %ld" % len(word2idx))
print("Words in train file: %ld" % word_count)
model = init_net(args)
model.share_memory()
if args.cuda:
model.cuda()
# stage 1, learn robust context representation.
vars(args)['stage'] = 1
print("Stage 1")
vars(args)['lr_anneal'] = True
vars(args)['t_start'] = time.monotonic()
processes = []
for p_id in range(args.processes):
p = mp.Process(target=train_process, args=(p_id, word_count_actual, word2idx, word_list, freq, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
del processes
print("\nStage 1, ", time.monotonic() - args.t_start, " secs ", word_count_actual.value)
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage1.pth.tar'
save_model(filename, model, args, word2idx)
if args.multi_proto:
# stage 2, create new sense in a non-parametric way.
# Freeze model paramters except sense_embs, and use only 1 process to prevent race condition
old_batch_size = vars(args)['batch_size']
model.global_embs.requires_grad = False
model.ctx_weight.requires_grad = False
model.sense_embs = model.sense_embs.cpu()
vars(args)['stage'] = 2
vars(args)['batch_size'] = 5000
print("\nStage 2")
word_count_actual.value = 0
vars(args)['t_start'] = time.monotonic()
train_process_stage2(0, word_count_actual, word2idx, word_list, freq, args, model)
if args.cuda:
model.cuda()
print("\nStage 2, ", time.monotonic() - args.t_start, " secs")
print("Current # of senses: %d" % model.n_senses)
pdb.set_trace()
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage2.pth.tar'
save_model(filename, model, args, word2idx)
# stage 3, no more sense creation.
vars(args)['lr'] = args.lr * 0.01
vars(args)['batch_size'] = old_batch_size
model.global_embs.requires_grad = True
model.ctx_weight.requires_grad = True
vars(args)['stage'] = 3
print("\nBegin stage 3")
word_count_actual.value = 0
vars(args)['t_start'] = time.monotonic()
processes = []
for p_id in range(args.processes):
p = mp.Process(target=train_process, args=(p_id, word_count_actual, word2idx, word_list, freq, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
print("\nStage 3, ", time.monotonic() - args.t_start, " secs")
# save model
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage3.pth.tar'
save_model(filename, model, args, word2idx)
print("")
| en | 0.419023 | #!/usr/bin/env python3 # Build the vocabulary. word2sense = np.zeros((args.vocab_size, 5), dtype='int32') for i in range(args.vocab_size): word2sense[i, 0] = i self.word2sense = torch.nn.Parameter(torch.from_numpy(word2sense).int()) self.word_sense_cnts = torch.nn.Parameter(torch.ones((args.vocab_size,)).int()) # Neg Log Likelihood # Initialize model. #'word2sense': model.word2sense, #model.word2sense.data = checkpoint['params']['word2sense'] #model.word_sense_cnts.data = checkpoint['params']['word_sense_cnts'] # Training # subsampling #optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=lr) #n_iter = 1 if args.stage == 2 else args.iter # get from data_queue and feed to model # lr anneal & output #if args.lr_anneal: # lr = args.lr * (1 - word_count_actual.value / (n_iter * args.train_words)) # if lr < 0.0001 * args.lr: # lr = 0.0001 * args.lr # for param_group in optimizer.param_groups: # param_group['lr'] = lr #sys.stdout.write("\rAlpha: %0.8f, Progess: %0.2f, Words/sec: %f, word_cnt: %d" % (lr, word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value)) #type_ids = chunk[:, 2*args.window+1:2*args.window+2+2*args.negative] # get type_idx from chunk, and do sense selection here. # get from data_queue and feed to model # update sense_embs #if model.n_senses + args.batch_size > model.sense_capacity: # new_capacity = model.sense_capacity * 3 // 2 # counter_list = np.concatenate( (counter_list, np.ones((new_capacity - model.sense_capacity),dtype='float32')), axis=0) # zero = np.zeros((new_capacity - model.sense_capacity, args.size), 'float32') # sense_embs = np.concatenate((sense_embs, zero), 0) # model.sense_capacity = new_capacity # print("\nexapnded sense_embs: %d" % model.n_senses) # stage 1, learn robust context representation. # stage 2, create new sense in a non-parametric way. # Freeze model paramters except sense_embs, and use only 1 process to prevent race condition # of senses: %d" % model.n_senses) # stage 3, no more sense creation. # save model | 2.453386 | 2 |
Ogrenciler/Varol/buyuksayi.py | ProEgitim/Python-Dersleri-BEM | 1 | 8340 | sayi1 = int(input("1. Sayı: "))
sayi2 = int(input("2. Sayı: "))
sayi3 = int(input("3. Sayı: "))
sayi4 = int(input("4. Sayı: "))
sayi5 = int(input("5. Sayı: "))
sayilar=[];
sayilar.append(sayi1)
sayilar.append(sayi2)
sayilar.append(sayi3)
sayilar.append(sayi4)
sayilar.append(sayi5)
sayilar.sort()
print("En büyük sayimiz..",sayilar[-1])
| sayi1 = int(input("1. Sayı: "))
sayi2 = int(input("2. Sayı: "))
sayi3 = int(input("3. Sayı: "))
sayi4 = int(input("4. Sayı: "))
sayi5 = int(input("5. Sayı: "))
sayilar=[];
sayilar.append(sayi1)
sayilar.append(sayi2)
sayilar.append(sayi3)
sayilar.append(sayi4)
sayilar.append(sayi5)
sayilar.sort()
print("En büyük sayimiz..",sayilar[-1])
| none | 1 | 3.649919 | 4 |
|
baselines/deepq/build_graph_mfec.py | MouseHu/emdqn | 0 | 8341 | <gh_stars>0
"""Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon a new value, if negative not update happens
(default: no update)
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
Parameters
----------
obs_t: object
a batch of observations
action: np.array
actions that were selected upon seeing obs_t.
dtype must be int32 and shape must be (batch_size,)
reward: np.array
immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,)
obs_tp1: object
observations that followed obs_t
done: np.array
1 if obs_t was the last observation in the episode and 0 otherwise
obs_tp1 gets ignored, but must be of the valid shape.
dtype must be float32 and shape must be (batch_size,)
weight: np.array
imporance weights for every element of the batch (gradient is multiplied
by the importance weight) dtype must be float32 and shape must be (batch_size,)
Returns
-------
td_error: np.array
a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import tensorflow as tf
import baselines.common.tf_util as U
import numpy as np
def build_act_mf(make_obs_ph, q_func, z_noise, num_actions, scope="deepq", reuse=None):
with tf.variable_scope(scope, reuse=reuse):
observations_ph = U.ensure_tf_input(make_obs_ph("observation"))
q, q_deterministic, v_mean, v_logvar, z_mean, z_logvar, recon_obs = q_func(observations_ph.get(), z_noise,
num_actions,
scope="q_func",
reuse=tf.AUTO_REUSE)
act = U.function(inputs=[observations_ph,z_noise],
outputs=[z_mean, z_logvar])
return act
def build_train_mf(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, scope="mfec",
alpha=1.0, beta=1.0, theta=1.0, latent_dim=32, ib=True, reuse=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
act_noise = tf.placeholder(tf.float32, [None, latent_dim], name="act_noise")
act_f = build_act_mf(make_obs_ph, q_func, act_noise, num_actions, scope=scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
# set up placeholders
# EMDQN
obs_vae_input = U.ensure_tf_input(make_obs_ph("obs_vae"))
z_noise_vae = tf.placeholder(tf.float32, [None, latent_dim], name="z_noise_vae")
inputs = [obs_vae_input,z_noise_vae]
if ib:
qec_input = tf.placeholder(tf.float32, [None], name='qec')
inputs.append(qec_input)
outputs = []
q_vae, q_deterministic_vae, v_mean_vae, v_logvar_vae, z_mean_vae, z_logvar_vae, recon_obs = q_func(obs_vae_input.get(),
z_noise_vae, num_actions,
scope="q_func",
reuse=True)
q_func_vars = U.scope_vars(U.absolute_scope_name("q_func"))
encoder_loss = -1 + z_mean_vae ** 2 + tf.exp(z_logvar_vae) - z_logvar_vae
total_loss = tf.reduce_mean(beta * encoder_loss)
decoder_loss = tf.keras.losses.binary_crossentropy(tf.reshape(recon_obs, [-1]), tf.reshape(
tf.dtypes.cast(obs_vae_input._placeholder, tf.float32), [-1]))
print("here", z_mean_vae.shape, z_logvar_vae.shape, encoder_loss.shape, decoder_loss.shape)
vae_loss = beta * encoder_loss + theta * decoder_loss
outputs.append(encoder_loss)
outputs.append(decoder_loss)
outputs.append(vae_loss)
total_loss += tf.reduce_mean(theta * decoder_loss)
if ib:
ib_loss = (v_mean_vae - tf.stop_gradient(tf.expand_dims(qec_input, 1))) ** 2 / tf.exp(
v_logvar_vae) + v_logvar_vae
print("here2", v_mean_vae.shape, tf.expand_dims(qec_input, 1).shape, v_logvar_vae.shape, ib_loss.shape)
total_ib_loss = alpha * ib_loss + beta * encoder_loss
outputs.append(total_ib_loss)
total_loss += tf.reduce_mean(alpha * ib_loss)
if grad_norm_clipping is not None:
optimize_expr = U.minimize_and_clip(optimizer,
total_loss,
var_list=q_func_vars,
clip_val=grad_norm_clipping)
else:
optimize_expr = optimizer.minimize(total_loss, var_list=q_func_vars)
# Create callable functions
# EMDQN
total_loss_summary = tf.summary.scalar("total loss", total_loss)
z_var_summary = tf.summary.scalar("z_var", tf.reduce_mean(tf.exp(z_logvar_vae)))
encoder_loss_summary = tf.summary.scalar("encoder loss", tf.reduce_mean(encoder_loss))
decoder_loss_summary = tf.summary.scalar("decoder loss", tf.reduce_mean(decoder_loss))
summaries = [total_loss_summary, z_var_summary, encoder_loss_summary, decoder_loss_summary]
if ib:
ib_loss_summary = tf.summary.scalar("ib loss", tf.reduce_mean(ib_loss))
total_ib_loss_summary = tf.summary.scalar("total ib loss", tf.reduce_mean(total_ib_loss))
summaries.append(ib_loss_summary)
summaries.append(total_ib_loss_summary)
summary = tf.summary.merge(summaries)
outputs.append(summary)
train = U.function(
inputs=inputs,
outputs=[total_loss,summary],
updates=[optimize_expr]
)
return act_f, train
| """Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon a new value, if negative not update happens
(default: no update)
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
Parameters
----------
obs_t: object
a batch of observations
action: np.array
actions that were selected upon seeing obs_t.
dtype must be int32 and shape must be (batch_size,)
reward: np.array
immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,)
obs_tp1: object
observations that followed obs_t
done: np.array
1 if obs_t was the last observation in the episode and 0 otherwise
obs_tp1 gets ignored, but must be of the valid shape.
dtype must be float32 and shape must be (batch_size,)
weight: np.array
imporance weights for every element of the batch (gradient is multiplied
by the importance weight) dtype must be float32 and shape must be (batch_size,)
Returns
-------
td_error: np.array
a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import tensorflow as tf
import baselines.common.tf_util as U
import numpy as np
def build_act_mf(make_obs_ph, q_func, z_noise, num_actions, scope="deepq", reuse=None):
with tf.variable_scope(scope, reuse=reuse):
observations_ph = U.ensure_tf_input(make_obs_ph("observation"))
q, q_deterministic, v_mean, v_logvar, z_mean, z_logvar, recon_obs = q_func(observations_ph.get(), z_noise,
num_actions,
scope="q_func",
reuse=tf.AUTO_REUSE)
act = U.function(inputs=[observations_ph,z_noise],
outputs=[z_mean, z_logvar])
return act
def build_train_mf(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, scope="mfec",
alpha=1.0, beta=1.0, theta=1.0, latent_dim=32, ib=True, reuse=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
act_noise = tf.placeholder(tf.float32, [None, latent_dim], name="act_noise")
act_f = build_act_mf(make_obs_ph, q_func, act_noise, num_actions, scope=scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
# set up placeholders
# EMDQN
obs_vae_input = U.ensure_tf_input(make_obs_ph("obs_vae"))
z_noise_vae = tf.placeholder(tf.float32, [None, latent_dim], name="z_noise_vae")
inputs = [obs_vae_input,z_noise_vae]
if ib:
qec_input = tf.placeholder(tf.float32, [None], name='qec')
inputs.append(qec_input)
outputs = []
q_vae, q_deterministic_vae, v_mean_vae, v_logvar_vae, z_mean_vae, z_logvar_vae, recon_obs = q_func(obs_vae_input.get(),
z_noise_vae, num_actions,
scope="q_func",
reuse=True)
q_func_vars = U.scope_vars(U.absolute_scope_name("q_func"))
encoder_loss = -1 + z_mean_vae ** 2 + tf.exp(z_logvar_vae) - z_logvar_vae
total_loss = tf.reduce_mean(beta * encoder_loss)
decoder_loss = tf.keras.losses.binary_crossentropy(tf.reshape(recon_obs, [-1]), tf.reshape(
tf.dtypes.cast(obs_vae_input._placeholder, tf.float32), [-1]))
print("here", z_mean_vae.shape, z_logvar_vae.shape, encoder_loss.shape, decoder_loss.shape)
vae_loss = beta * encoder_loss + theta * decoder_loss
outputs.append(encoder_loss)
outputs.append(decoder_loss)
outputs.append(vae_loss)
total_loss += tf.reduce_mean(theta * decoder_loss)
if ib:
ib_loss = (v_mean_vae - tf.stop_gradient(tf.expand_dims(qec_input, 1))) ** 2 / tf.exp(
v_logvar_vae) + v_logvar_vae
print("here2", v_mean_vae.shape, tf.expand_dims(qec_input, 1).shape, v_logvar_vae.shape, ib_loss.shape)
total_ib_loss = alpha * ib_loss + beta * encoder_loss
outputs.append(total_ib_loss)
total_loss += tf.reduce_mean(alpha * ib_loss)
if grad_norm_clipping is not None:
optimize_expr = U.minimize_and_clip(optimizer,
total_loss,
var_list=q_func_vars,
clip_val=grad_norm_clipping)
else:
optimize_expr = optimizer.minimize(total_loss, var_list=q_func_vars)
# Create callable functions
# EMDQN
total_loss_summary = tf.summary.scalar("total loss", total_loss)
z_var_summary = tf.summary.scalar("z_var", tf.reduce_mean(tf.exp(z_logvar_vae)))
encoder_loss_summary = tf.summary.scalar("encoder loss", tf.reduce_mean(encoder_loss))
decoder_loss_summary = tf.summary.scalar("decoder loss", tf.reduce_mean(decoder_loss))
summaries = [total_loss_summary, z_var_summary, encoder_loss_summary, decoder_loss_summary]
if ib:
ib_loss_summary = tf.summary.scalar("ib loss", tf.reduce_mean(ib_loss))
total_ib_loss_summary = tf.summary.scalar("total ib loss", tf.reduce_mean(total_ib_loss))
summaries.append(ib_loss_summary)
summaries.append(total_ib_loss_summary)
summary = tf.summary.merge(summaries)
outputs.append(summary)
train = U.function(
inputs=inputs,
outputs=[total_loss,summary],
updates=[optimize_expr]
)
return act_f, train | en | 0.760463 | Deep Q learning graph The functions in this file can are used to create the following functions: ======= act ======== Function to chose an action given an observation Parameters ---------- observation: object Observation that can be feed into the output of make_obs_ph stochastic: bool if set to False all the actions are always deterministic (default False) update_eps_ph: float update epsilon a new value, if negative not update happens (default: no update) Returns ------- Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for every element of the batch. ======= train ======= Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error: td_error = Q(s,a) - (r + gamma * max_a' Q(s', a')) loss = huber_loss[td_error] Parameters ---------- obs_t: object a batch of observations action: np.array actions that were selected upon seeing obs_t. dtype must be int32 and shape must be (batch_size,) reward: np.array immediate reward attained after executing those actions dtype must be float32 and shape must be (batch_size,) obs_tp1: object observations that followed obs_t done: np.array 1 if obs_t was the last observation in the episode and 0 otherwise obs_tp1 gets ignored, but must be of the valid shape. dtype must be float32 and shape must be (batch_size,) weight: np.array imporance weights for every element of the batch (gradient is multiplied by the importance weight) dtype must be float32 and shape must be (batch_size,) Returns ------- td_error: np.array a list of differences between Q(s,a) and the target in Bellman's equation. dtype is float32 and shape is (batch_size,) ======= update_target ======== copy the parameters from optimized Q function to the target Q function. In Q learning we actually optimize the following error: Q(s,a) - (r + gamma * max_a' Q'(s', a')) Where Q' is lagging behind Q to stablize the learning. For example for Atari Q' is set to Q once every 10000 updates training steps. Creates the train function: Parameters ---------- make_obs_ph: str -> tf.placeholder or TfInput a function that takes a name and creates a placeholder of input with that name q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions reuse: bool whether or not to reuse the graph variables optimizer: tf.train.Optimizer optimizer to use for the Q-learning objective. grad_norm_clipping: float or None clip gradient norms to this value. If None no clipping is performed. gamma: float discount rate. double_q: bool if true will use Double Q Learning (https://arxiv.org/abs/1509.06461). In general it is a good idea to keep it enabled. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. Returns ------- act: (tf.Variable, bool, float) -> tf.Variable function to select and action given observation. ` See the top of the file for details. train: (object, np.array, np.array, object, np.array, np.array) -> np.array optimize the error in Bellman's equation. ` See the top of the file for details. update_target: () -> () copy the parameters from optimized Q function to the target Q function. ` See the top of the file for details. debug: {str: function} a bunch of functions to print debug data like q_values. # set up placeholders # EMDQN # Create callable functions # EMDQN | 3.10698 | 3 |
tests/test_prior.py | frodre/LMR | 17 | 8342 | <filename>tests/test_prior.py
import sys
sys.path.append('../')
import LMR_config as cfg
import LMR_prior
import numpy as np
import pytest
def test_prior_seed():
cfg_obj = cfg.Config(**{'core':{'seed': 2}})
prior_cfg = cfg_obj.prior
prior_source = '20cr'
datadir_prior = 'data'
datafile_prior = '[vardef_template]_gridded_dat.nc'
state_variables = {'air': 'anom'}
state_kind = 'anom'
X = LMR_prior.prior_assignment(prior_source)
X.prior_datadir = datadir_prior
X.prior_datafile = datafile_prior
X.statevars = state_variables
X.Nens = 1
X.detrend = False
X.kind = state_kind
X.avgInterval = [1,2,3,4,5,6,7,8,9,10,11,12]
X.populate_ensemble(prior_source, prior_cfg)
X2 = LMR_prior.prior_assignment(prior_source)
X2.prior_datadir = datadir_prior
X2.prior_datafile = datafile_prior
X2.statevars = state_variables
X2.Nens = 1
X2.detrend = False
X2.kind = state_kind
X2.avgInterval = [1,2,3,4,5,6,7,8,9,10,11,12]
X2.populate_ensemble(prior_source, prior_cfg)
np.testing.assert_equal(X2.ens, X.ens)
def test_prior_use_full_prior():
cfg_obj = cfg.Config(**{'core': {'seed': None}})
prior_cfg = cfg_obj.prior
prior_source = '20cr'
datadir_prior = 'data'
datafile_prior = '[vardef_template]_gridded_dat.nc'
state_variables = {'air': 'anom'}
state_kind = 'anom'
avgInterval = [1,2,3,4,5,6,7,8,9,10,11,12]
X = LMR_prior.prior_assignment(prior_source)
X.prior_datadir = datadir_prior
X.prior_datafile = datafile_prior
X.statevars = state_variables
X.Nens = None
X.detrend = False
X.kind = state_kind
X.avgInterval = avgInterval
X.populate_ensemble(prior_source, prior_cfg)
X2 = LMR_prior.prior_assignment(prior_source)
X2.prior_datadir = datadir_prior
X2.prior_datafile = datafile_prior
X2.statevars = state_variables
X2.Nens = None
X2.detrend = False
X2.kind = state_kind
X2.avgInterval = avgInterval
X2.read_prior()
# Transform full prior into ensemble-like shape
prior_vals = X2.prior_dict['air']['value']
prior_vals = prior_vals.reshape(prior_vals.shape[0], -1)
prior_vals = prior_vals.T
np.testing.assert_equal(X.ens, prior_vals)
| <filename>tests/test_prior.py
import sys
sys.path.append('../')
import LMR_config as cfg
import LMR_prior
import numpy as np
import pytest
def test_prior_seed():
cfg_obj = cfg.Config(**{'core':{'seed': 2}})
prior_cfg = cfg_obj.prior
prior_source = '20cr'
datadir_prior = 'data'
datafile_prior = '[vardef_template]_gridded_dat.nc'
state_variables = {'air': 'anom'}
state_kind = 'anom'
X = LMR_prior.prior_assignment(prior_source)
X.prior_datadir = datadir_prior
X.prior_datafile = datafile_prior
X.statevars = state_variables
X.Nens = 1
X.detrend = False
X.kind = state_kind
X.avgInterval = [1,2,3,4,5,6,7,8,9,10,11,12]
X.populate_ensemble(prior_source, prior_cfg)
X2 = LMR_prior.prior_assignment(prior_source)
X2.prior_datadir = datadir_prior
X2.prior_datafile = datafile_prior
X2.statevars = state_variables
X2.Nens = 1
X2.detrend = False
X2.kind = state_kind
X2.avgInterval = [1,2,3,4,5,6,7,8,9,10,11,12]
X2.populate_ensemble(prior_source, prior_cfg)
np.testing.assert_equal(X2.ens, X.ens)
def test_prior_use_full_prior():
cfg_obj = cfg.Config(**{'core': {'seed': None}})
prior_cfg = cfg_obj.prior
prior_source = '20cr'
datadir_prior = 'data'
datafile_prior = '[vardef_template]_gridded_dat.nc'
state_variables = {'air': 'anom'}
state_kind = 'anom'
avgInterval = [1,2,3,4,5,6,7,8,9,10,11,12]
X = LMR_prior.prior_assignment(prior_source)
X.prior_datadir = datadir_prior
X.prior_datafile = datafile_prior
X.statevars = state_variables
X.Nens = None
X.detrend = False
X.kind = state_kind
X.avgInterval = avgInterval
X.populate_ensemble(prior_source, prior_cfg)
X2 = LMR_prior.prior_assignment(prior_source)
X2.prior_datadir = datadir_prior
X2.prior_datafile = datafile_prior
X2.statevars = state_variables
X2.Nens = None
X2.detrend = False
X2.kind = state_kind
X2.avgInterval = avgInterval
X2.read_prior()
# Transform full prior into ensemble-like shape
prior_vals = X2.prior_dict['air']['value']
prior_vals = prior_vals.reshape(prior_vals.shape[0], -1)
prior_vals = prior_vals.T
np.testing.assert_equal(X.ens, prior_vals)
| en | 0.926232 | # Transform full prior into ensemble-like shape | 2.155715 | 2 |
src/salgan_dhf1k/train_bce.py | juanjo3ns/SalGAN2 | 0 | 8343 | <gh_stars>0
import os
from dataloader.datasetDHF1K import DHF1K
from torch.utils.data import DataLoader
from utils.salgan_utils import save_model, get_lr_optimizer
from utils.sendTelegram import send
from utils.printer import param_print
from utils.salgan_generator import create_model, add_bn
from evaluation.fast_evaluation import compute_metrics
import numpy as np
import torch
from torch.nn import AvgPool2d
from torch.nn.modules.loss import BCELoss
import torch.backends.cudnn as cudnn
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
from time import time
from IPython import embed
from tensorboard_logger import configure, log_value, log_histogram
TRAIN = 'train'
VAL = 'val'
TEST = 'test'
def add_layer_weights(vgg_weights):
# Mean of RGB weights of first layer with size [64,1,3,3]
layer1 = vgg_weights['0.weight']
mean_rgb = layer1.mean(dim=1,keepdim=True)
vgg_weights['0.weight'] = torch.cat([layer1.cuda(),mean_rgb.cuda()],1)
# We could do it easily accessing to the weights trought model[0].weight and change dimension 1, but as we
# already have the 4th channel we'd be doing the mean of all of the channels, inicializing it in the wrong way.
return vgg_weights
def train_eval(mode, model, optimizer, dataloader):
if mode == TRAIN:
N = len(ds_train)/batch_size
model.train()
else:
N = len(ds_validate)/batch_size
model.eval()
total_loss = []
#iterate epoch...
#iterate epoch...
for i, X in enumerate(dataloader[mode]):
inputs = X[0].cuda()
# noramlize saliency maps values between [0,1]
gt_maps = X[1].cuda()/255
embed()
predictions = model.forward(inputs).squeeze()
# reduce size for loss
reduce_size = AvgPool2d((4,4))
pred_ = reduce_size(predictions)
gt_maps_ = reduce_size(gt_maps)
pred_ = pred_.view(pred_.size()[0], -1)
gt_maps_ = gt_maps_.view(gt_maps_.size()[0], -1)
loss = bce_loss(pred_, gt_maps_)
# make actual step update
if mode==TRAIN:
# compute gradients
loss.backward()
# step optimizer
optimizer.step()
# reset grads for next step
optimizer.zero_grad()
print("\t{}/{} loss:{}".format(i, int(N), loss.item()), end="\r")
total_loss.append(loss.item())
total_loss=np.mean(total_loss)
return total_loss
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--path_out", default='sal_dhf1k_adamdepthcoordaugm2_frombestsaldepth',
type=str,
help="""set output path for the trained model""")
parser.add_argument("--batch_size", default=12,
type=int,
help="""Set batch size""")
parser.add_argument("--n_epochs", default=10, type=int,
help="""Set total number of epochs""")
parser.add_argument("--depth", default=False, type=bool,
help="""Enable 4th channel with depth""")
parser.add_argument("--augment", default=False, type=bool,
help="""Enable data augmentation""")
parser.add_argument("--coord", default=False, type=bool,
help="""Enable coordconv""")
parser.add_argument("--flow", default=False, type=bool,
help="""Enable opticalflow""")
parser.add_argument("--lr", type=float, default=0.00001,
help="""Learning rate for training""")
parser.add_argument("--patience", type=int, default=3,
help="""Patience for learning rate scheduler (default 10)""")
args = parser.parse_args()
# set output path ==========================================================
path_out = '../trained_models/batch12_/' + args.path_out
if not os.path.exists(path_out):
# create output path
os.makedirs(path_out)
# create output for models
path_models = os.path.join(path_out, 'models')
if not os.path.exists(path_models):
os.makedirs(path_models)
# tensorboard
configure("{}".format(path_out), flush_secs=5)
# data =====================================================================
batch_size = args.batch_size
n_epochs = args.n_epochs
lr = args.lr
DEPTH = args.depth
AUGMENT = args.augment
COORD = args.coord
FLOW = args.flow
# Datasets for DHF1K
ds_train = DHF1K(mode=TRAIN, transformation=True, depth=DEPTH, d_augm=AUGMENT, coord=COORD)
ds_validate = DHF1K(mode=VAL, transformation=False, depth=DEPTH, d_augm=False, coord=COORD)
# Dataloaders
dataloader = {
TRAIN: DataLoader(ds_train, batch_size=batch_size,
shuffle=True, num_workers=2),
VAL: DataLoader(ds_validate, batch_size=batch_size,
shuffle=False, num_workers=2)
}
# POSSIBILITY OF CHOOSING GPU
torch.cuda.set_device(1)
# MODEL INITIALIZATION
print("Init model...")
vgg_weights = torch.load('../trained_models/salgan_baseline.pt')['state_dict']
model = create_model(3)
# if DEPTH and COORD:
# model = create_model(6)
# for i in range(0,3):
# vgg_weights = add_layer_weights(vgg_weights)
# elif DEPTH:
# model = create_model(4)
# add_layer_weights(vgg_weights)
# elif COORD:
# model = create_model(5)
# for i in range(0,2):
# vgg_weights = add_layer_weights(vgg_weights)
# else: model = create_model(3)
# Instead of adding manually the layer of new weights, we could use strict=False
model.load_state_dict(vgg_weights)
# Add batch normalization to current model if needed
model = add_bn(model)
model.train()
model.cuda()
cudnn.benchmark = True
# NOT WORKING UNMOUNTED DISK
# If we have the two GPU's available we are going to use both
# if torch.cuda.device_count() > 1:
# print("Using ", torch.cuda.device_count(), "GPUs!")
# model = torch.nn.DataParallel(model)
# LOSS FUNCTION
bce_loss = BCELoss()
# FINE-TUNE WHOLE NETWORK OR JUST DECODER => uncomment / or different lr for each part
# decoder_parameters = []
# base_params = []
# for i, (a, p) in enumerate(model.named_parameters()):
# embed()
# if i>25:
# # print(i, a, p.shape)
# decoder_parameters.append(p)
# else:
# base_params.append(p)
# If you wanna train just the decoder put this
# p.requires_grad = False
# ADAM OPTIMIZER
optimizer = Adam(model.parameters(),
lr = lr,
weight_decay=0.000001)
# STOCHASTIC GRADIENT DESCENT OPTIMIZER
# optimizer = SGD(model.parameters(),
# lr = 0.00001,
# momentum=0.9,
# weight_decay=0.00001,
# nesterov=True)
# NUMBER OF TOTAL PARAMETERS
# pytorch_total_params = sum(p.numel() for p in model.parameters())
# NUMBER OF TRAINABLE PARAMETERS
trainable_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Trainable parameters: ", trainable_parameters)
send("Trainable parameters: " + str(trainable_parameters))
send("Experiment: " + args.path_out)
# PRINT TABLE OF PARAMETERS
param_print([path_out,"",DEPTH,AUGMENT,COORD,FLOW,batch_size,lr,n_epochs, trainable_parameters])
# set learning rate scheduler
# ReduceLROnPlateau(
# optimizer,
# mode (str) 'min':lr es reduira quan la metrica no es redueixi mes, 'max' al contrari,
# factor (float) factor de reduccio de la lr,
# patience (int) num epochs sense millora a partir dels quals es redueix lr,
# verbose (bool),
# )
# scheduler = ReduceLROnPlateau(optimizer,
# 'min',
# patience=args.patience,
# verbose=True)
scheduler = StepLR(optimizer, step_size=3, gamma=0.1)
best_loss=9999999
# main loop training =======================================================
for id_epoch in range(n_epochs):
for mode in [VAL, TRAIN]:
# select dataloader
data_iterator = dataloader[mode]
#
# # saliency metrics
# if mode ==VAL:
# print("Evaluating metrics....")
# # only do 100 images from validation
# metrics = compute_metrics(model, 100, DEPTH, COORD)
#
# # log metric values
# for metric in metrics.keys():
# log_value("Metrics/{}".format(metric),
# metrics[metric], id_epoch)
#
# # get epoch loss
# print("--> {} epoch {}".format(mode, id_epoch))
epoch_loss = train_eval(mode, model, optimizer, dataloader)
lr = list(get_lr_optimizer(optimizer))[0]
print("-----------")
print("Done! {} epoch {} loss {} lr {}".format(mode, id_epoch, epoch_loss, lr))
send("{} epoch {}/{} loss {}".format(mode, id_epoch, n_epochs, epoch_loss))
print("\n")
# record loss
log_value("loss/{}".format(mode), epoch_loss, id_epoch)
log_value("lr/{}".format(mode), lr, id_epoch)
# for v in model.state_dict():
# log_histogram("Layer {}".format(v), model.state_dict()[v], id_epoch)
if (id_epoch%2)==0:
save_model(model, optimizer, id_epoch, path_out, name_model='{:03d}'.format(id_epoch))
# store model if val loss improves
if mode==VAL:
if best_loss > epoch_loss:
# update loss
best_loss = epoch_loss
save_model(model, optimizer, id_epoch, path_out, name_model='best')
# scheduler.step(epoch_loss)
scheduler.step()
| import os
from dataloader.datasetDHF1K import DHF1K
from torch.utils.data import DataLoader
from utils.salgan_utils import save_model, get_lr_optimizer
from utils.sendTelegram import send
from utils.printer import param_print
from utils.salgan_generator import create_model, add_bn
from evaluation.fast_evaluation import compute_metrics
import numpy as np
import torch
from torch.nn import AvgPool2d
from torch.nn.modules.loss import BCELoss
import torch.backends.cudnn as cudnn
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
from time import time
from IPython import embed
from tensorboard_logger import configure, log_value, log_histogram
TRAIN = 'train'
VAL = 'val'
TEST = 'test'
def add_layer_weights(vgg_weights):
# Mean of RGB weights of first layer with size [64,1,3,3]
layer1 = vgg_weights['0.weight']
mean_rgb = layer1.mean(dim=1,keepdim=True)
vgg_weights['0.weight'] = torch.cat([layer1.cuda(),mean_rgb.cuda()],1)
# We could do it easily accessing to the weights trought model[0].weight and change dimension 1, but as we
# already have the 4th channel we'd be doing the mean of all of the channels, inicializing it in the wrong way.
return vgg_weights
def train_eval(mode, model, optimizer, dataloader):
if mode == TRAIN:
N = len(ds_train)/batch_size
model.train()
else:
N = len(ds_validate)/batch_size
model.eval()
total_loss = []
#iterate epoch...
#iterate epoch...
for i, X in enumerate(dataloader[mode]):
inputs = X[0].cuda()
# noramlize saliency maps values between [0,1]
gt_maps = X[1].cuda()/255
embed()
predictions = model.forward(inputs).squeeze()
# reduce size for loss
reduce_size = AvgPool2d((4,4))
pred_ = reduce_size(predictions)
gt_maps_ = reduce_size(gt_maps)
pred_ = pred_.view(pred_.size()[0], -1)
gt_maps_ = gt_maps_.view(gt_maps_.size()[0], -1)
loss = bce_loss(pred_, gt_maps_)
# make actual step update
if mode==TRAIN:
# compute gradients
loss.backward()
# step optimizer
optimizer.step()
# reset grads for next step
optimizer.zero_grad()
print("\t{}/{} loss:{}".format(i, int(N), loss.item()), end="\r")
total_loss.append(loss.item())
total_loss=np.mean(total_loss)
return total_loss
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--path_out", default='sal_dhf1k_adamdepthcoordaugm2_frombestsaldepth',
type=str,
help="""set output path for the trained model""")
parser.add_argument("--batch_size", default=12,
type=int,
help="""Set batch size""")
parser.add_argument("--n_epochs", default=10, type=int,
help="""Set total number of epochs""")
parser.add_argument("--depth", default=False, type=bool,
help="""Enable 4th channel with depth""")
parser.add_argument("--augment", default=False, type=bool,
help="""Enable data augmentation""")
parser.add_argument("--coord", default=False, type=bool,
help="""Enable coordconv""")
parser.add_argument("--flow", default=False, type=bool,
help="""Enable opticalflow""")
parser.add_argument("--lr", type=float, default=0.00001,
help="""Learning rate for training""")
parser.add_argument("--patience", type=int, default=3,
help="""Patience for learning rate scheduler (default 10)""")
args = parser.parse_args()
# set output path ==========================================================
path_out = '../trained_models/batch12_/' + args.path_out
if not os.path.exists(path_out):
# create output path
os.makedirs(path_out)
# create output for models
path_models = os.path.join(path_out, 'models')
if not os.path.exists(path_models):
os.makedirs(path_models)
# tensorboard
configure("{}".format(path_out), flush_secs=5)
# data =====================================================================
batch_size = args.batch_size
n_epochs = args.n_epochs
lr = args.lr
DEPTH = args.depth
AUGMENT = args.augment
COORD = args.coord
FLOW = args.flow
# Datasets for DHF1K
ds_train = DHF1K(mode=TRAIN, transformation=True, depth=DEPTH, d_augm=AUGMENT, coord=COORD)
ds_validate = DHF1K(mode=VAL, transformation=False, depth=DEPTH, d_augm=False, coord=COORD)
# Dataloaders
dataloader = {
TRAIN: DataLoader(ds_train, batch_size=batch_size,
shuffle=True, num_workers=2),
VAL: DataLoader(ds_validate, batch_size=batch_size,
shuffle=False, num_workers=2)
}
# POSSIBILITY OF CHOOSING GPU
torch.cuda.set_device(1)
# MODEL INITIALIZATION
print("Init model...")
vgg_weights = torch.load('../trained_models/salgan_baseline.pt')['state_dict']
model = create_model(3)
# if DEPTH and COORD:
# model = create_model(6)
# for i in range(0,3):
# vgg_weights = add_layer_weights(vgg_weights)
# elif DEPTH:
# model = create_model(4)
# add_layer_weights(vgg_weights)
# elif COORD:
# model = create_model(5)
# for i in range(0,2):
# vgg_weights = add_layer_weights(vgg_weights)
# else: model = create_model(3)
# Instead of adding manually the layer of new weights, we could use strict=False
model.load_state_dict(vgg_weights)
# Add batch normalization to current model if needed
model = add_bn(model)
model.train()
model.cuda()
cudnn.benchmark = True
# NOT WORKING UNMOUNTED DISK
# If we have the two GPU's available we are going to use both
# if torch.cuda.device_count() > 1:
# print("Using ", torch.cuda.device_count(), "GPUs!")
# model = torch.nn.DataParallel(model)
# LOSS FUNCTION
bce_loss = BCELoss()
# FINE-TUNE WHOLE NETWORK OR JUST DECODER => uncomment / or different lr for each part
# decoder_parameters = []
# base_params = []
# for i, (a, p) in enumerate(model.named_parameters()):
# embed()
# if i>25:
# # print(i, a, p.shape)
# decoder_parameters.append(p)
# else:
# base_params.append(p)
# If you wanna train just the decoder put this
# p.requires_grad = False
# ADAM OPTIMIZER
optimizer = Adam(model.parameters(),
lr = lr,
weight_decay=0.000001)
# STOCHASTIC GRADIENT DESCENT OPTIMIZER
# optimizer = SGD(model.parameters(),
# lr = 0.00001,
# momentum=0.9,
# weight_decay=0.00001,
# nesterov=True)
# NUMBER OF TOTAL PARAMETERS
# pytorch_total_params = sum(p.numel() for p in model.parameters())
# NUMBER OF TRAINABLE PARAMETERS
trainable_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Trainable parameters: ", trainable_parameters)
send("Trainable parameters: " + str(trainable_parameters))
send("Experiment: " + args.path_out)
# PRINT TABLE OF PARAMETERS
param_print([path_out,"",DEPTH,AUGMENT,COORD,FLOW,batch_size,lr,n_epochs, trainable_parameters])
# set learning rate scheduler
# ReduceLROnPlateau(
# optimizer,
# mode (str) 'min':lr es reduira quan la metrica no es redueixi mes, 'max' al contrari,
# factor (float) factor de reduccio de la lr,
# patience (int) num epochs sense millora a partir dels quals es redueix lr,
# verbose (bool),
# )
# scheduler = ReduceLROnPlateau(optimizer,
# 'min',
# patience=args.patience,
# verbose=True)
scheduler = StepLR(optimizer, step_size=3, gamma=0.1)
best_loss=9999999
# main loop training =======================================================
for id_epoch in range(n_epochs):
for mode in [VAL, TRAIN]:
# select dataloader
data_iterator = dataloader[mode]
#
# # saliency metrics
# if mode ==VAL:
# print("Evaluating metrics....")
# # only do 100 images from validation
# metrics = compute_metrics(model, 100, DEPTH, COORD)
#
# # log metric values
# for metric in metrics.keys():
# log_value("Metrics/{}".format(metric),
# metrics[metric], id_epoch)
#
# # get epoch loss
# print("--> {} epoch {}".format(mode, id_epoch))
epoch_loss = train_eval(mode, model, optimizer, dataloader)
lr = list(get_lr_optimizer(optimizer))[0]
print("-----------")
print("Done! {} epoch {} loss {} lr {}".format(mode, id_epoch, epoch_loss, lr))
send("{} epoch {}/{} loss {}".format(mode, id_epoch, n_epochs, epoch_loss))
print("\n")
# record loss
log_value("loss/{}".format(mode), epoch_loss, id_epoch)
log_value("lr/{}".format(mode), lr, id_epoch)
# for v in model.state_dict():
# log_histogram("Layer {}".format(v), model.state_dict()[v], id_epoch)
if (id_epoch%2)==0:
save_model(model, optimizer, id_epoch, path_out, name_model='{:03d}'.format(id_epoch))
# store model if val loss improves
if mode==VAL:
if best_loss > epoch_loss:
# update loss
best_loss = epoch_loss
save_model(model, optimizer, id_epoch, path_out, name_model='best')
# scheduler.step(epoch_loss)
scheduler.step() | en | 0.505862 | # Mean of RGB weights of first layer with size [64,1,3,3] # We could do it easily accessing to the weights trought model[0].weight and change dimension 1, but as we # already have the 4th channel we'd be doing the mean of all of the channels, inicializing it in the wrong way. #iterate epoch... #iterate epoch... # noramlize saliency maps values between [0,1] # reduce size for loss # make actual step update # compute gradients # step optimizer # reset grads for next step set output path for the trained model Set batch size Set total number of epochs Enable 4th channel with depth Enable data augmentation Enable coordconv Enable opticalflow Learning rate for training Patience for learning rate scheduler (default 10) # set output path ========================================================== # create output path # create output for models # tensorboard # data ===================================================================== # Datasets for DHF1K # Dataloaders # POSSIBILITY OF CHOOSING GPU # MODEL INITIALIZATION # if DEPTH and COORD: # model = create_model(6) # for i in range(0,3): # vgg_weights = add_layer_weights(vgg_weights) # elif DEPTH: # model = create_model(4) # add_layer_weights(vgg_weights) # elif COORD: # model = create_model(5) # for i in range(0,2): # vgg_weights = add_layer_weights(vgg_weights) # else: model = create_model(3) # Instead of adding manually the layer of new weights, we could use strict=False # Add batch normalization to current model if needed # NOT WORKING UNMOUNTED DISK # If we have the two GPU's available we are going to use both # if torch.cuda.device_count() > 1: # print("Using ", torch.cuda.device_count(), "GPUs!") # model = torch.nn.DataParallel(model) # LOSS FUNCTION # FINE-TUNE WHOLE NETWORK OR JUST DECODER => uncomment / or different lr for each part # decoder_parameters = [] # base_params = [] # for i, (a, p) in enumerate(model.named_parameters()): # embed() # if i>25: # # print(i, a, p.shape) # decoder_parameters.append(p) # else: # base_params.append(p) # If you wanna train just the decoder put this # p.requires_grad = False # ADAM OPTIMIZER # STOCHASTIC GRADIENT DESCENT OPTIMIZER # optimizer = SGD(model.parameters(), # lr = 0.00001, # momentum=0.9, # weight_decay=0.00001, # nesterov=True) # NUMBER OF TOTAL PARAMETERS # pytorch_total_params = sum(p.numel() for p in model.parameters()) # NUMBER OF TRAINABLE PARAMETERS # PRINT TABLE OF PARAMETERS # set learning rate scheduler # ReduceLROnPlateau( # optimizer, # mode (str) 'min':lr es reduira quan la metrica no es redueixi mes, 'max' al contrari, # factor (float) factor de reduccio de la lr, # patience (int) num epochs sense millora a partir dels quals es redueix lr, # verbose (bool), # ) # scheduler = ReduceLROnPlateau(optimizer, # 'min', # patience=args.patience, # verbose=True) # main loop training ======================================================= # select dataloader # # # saliency metrics # if mode ==VAL: # print("Evaluating metrics....") # # only do 100 images from validation # metrics = compute_metrics(model, 100, DEPTH, COORD) # # # log metric values # for metric in metrics.keys(): # log_value("Metrics/{}".format(metric), # metrics[metric], id_epoch) # # # get epoch loss # print("--> {} epoch {}".format(mode, id_epoch)) # record loss # for v in model.state_dict(): # log_histogram("Layer {}".format(v), model.state_dict()[v], id_epoch) # store model if val loss improves # update loss # scheduler.step(epoch_loss) | 1.974925 | 2 |
dragontail/content/models/basicpage.py | tracon/dragontail | 0 | 8344 | <filename>dragontail/content/models/basicpage.py<gh_stars>0
# encoding: utf-8
from django.db import models
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.fields import StreamField
from wagtail.wagtailcore import blocks
from wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.wagtailimages.blocks import ImageChooserBlock
class BasicPage(Page):
body = StreamField([
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
])
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
def get_template(self, request, *args, **kwargs):
from .templatesettings import TemplateSettings
template_settings = TemplateSettings.for_site(request.site)
return template_settings.basic_page_template | <filename>dragontail/content/models/basicpage.py<gh_stars>0
# encoding: utf-8
from django.db import models
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.fields import StreamField
from wagtail.wagtailcore import blocks
from wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.wagtailimages.blocks import ImageChooserBlock
class BasicPage(Page):
body = StreamField([
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
])
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
def get_template(self, request, *args, **kwargs):
from .templatesettings import TemplateSettings
template_settings = TemplateSettings.for_site(request.site)
return template_settings.basic_page_template | en | 0.83829 | # encoding: utf-8 | 1.944486 | 2 |
infapy/v3/agentService.py | infapy/infapy | 0 | 8345 | # Copyright (c) 2021-Present (<NAME>)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests as re
import infapy
from infapy.exceptions import InvalidDetailsProvided
class AgentService():
def __init__(self,v3,v3BaseURL,v3SessionID):
self._v3 = v3
self._v3BaseURL = v3BaseURL
self._v3SessionID = v3SessionID
def updateAgentService(self,serviceName, serviceAction, agentId):
url=self._v3BaseURL + "/public/core/v3/agent/service"
headers = {'Content-Type': "application/json", 'Accept': "application/json","INFA-SESSION-ID":self._v3SessionID}
body = {
'serviceName':serviceName,
'serviceAction':serviceAction,
'agentId':agentId}
infapy.log.info("agentService API URL - " + url)
infapy.log.info("API Headers: " + str(headers))
infapy.log.info("Body: " + str(body))
try:
response = re.post(url=url, json=body, headers=headers)
data = response.json()
infapy.log.debug(str(data))
try:
if ("error" in data):
infapy.log.error("Please validate the details passed")
infapy.log.error(str(data))
raise InvalidDetailsProvided
except Exception as e:
infapy.log.exception(e)
raise
except Exception as e:
infapy.log.exception(e)
raise
infapy.log.info(data["message"])
return data | # Copyright (c) 2021-Present (<NAME>)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests as re
import infapy
from infapy.exceptions import InvalidDetailsProvided
class AgentService():
def __init__(self,v3,v3BaseURL,v3SessionID):
self._v3 = v3
self._v3BaseURL = v3BaseURL
self._v3SessionID = v3SessionID
def updateAgentService(self,serviceName, serviceAction, agentId):
url=self._v3BaseURL + "/public/core/v3/agent/service"
headers = {'Content-Type': "application/json", 'Accept': "application/json","INFA-SESSION-ID":self._v3SessionID}
body = {
'serviceName':serviceName,
'serviceAction':serviceAction,
'agentId':agentId}
infapy.log.info("agentService API URL - " + url)
infapy.log.info("API Headers: " + str(headers))
infapy.log.info("Body: " + str(body))
try:
response = re.post(url=url, json=body, headers=headers)
data = response.json()
infapy.log.debug(str(data))
try:
if ("error" in data):
infapy.log.error("Please validate the details passed")
infapy.log.error(str(data))
raise InvalidDetailsProvided
except Exception as e:
infapy.log.exception(e)
raise
except Exception as e:
infapy.log.exception(e)
raise
infapy.log.info(data["message"])
return data | en | 0.851108 | # Copyright (c) 2021-Present (<NAME>) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.067729 | 2 |
home_application/views.py | pengwow/test-demo | 0 | 8346 | <reponame>pengwow/test-demo
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from common.mymako import render_mako_context, render_json
from blueking.component.shortcuts import get_client_by_request
from django.views.decorators.csrf import csrf_exempt
from models import TEST, HostDisk, ScriptExecInfo
import json
import base64
def home(request):
"""
首页
"""
# yewu = [
# {'id': 1, "name": u"业务1"},
# {'id': 2, "name": u"业务2"},
# {'id': 3, "name": u"业务3"},
# ]
# 从环境配置获取APP信息,从request获取当前用户信息
client = get_client_by_request(request)
kwargs = {}
result = client.cc.search_business(kwargs)
print(result)
yewu = result['data']['info']
return render_mako_context(request, '/home_application/home.html',
{
"yewu": yewu,
"AAA": u"业务列表"
})
def submit_template(request):
"""
首页
"""
print(request.body)
return render_json({"1111111": "dddddddddd"})
def dev_guide(request):
"""
开发指引
"""
return render_mako_context(request, '/home_application/dev_guide.html')
def contactus(request):
"""
联系我们
"""
return render_mako_context(request, '/home_application/contact.html')
def tijiao(request):
data = json.loads(request.body)
print(type(data))
sss = TEST(**data)
sss.save()
return render_json({"DATA": "AAAAAAAA"})
def host_disk(request):
host_list = HostDisk.objects.all()
re_list = list()
for item in host_list:
temp_dict = dict()
temp_dict['os'] = item.os
temp_dict['host_ip'] = item.host_ip
temp_dict['host_name'] = item.host_name
temp_dict['host_path'] = item.host_path
temp_dict['create_time'] = item.create_time
re_list.append(temp_dict)
print(re_list)
return render_mako_context(request,
'/home_application/host_disk.html',
{'host_all': re_list}
)
def host_tijiao(request):
data = request.body
print(type(data))
data = json.loads(data)
host = HostDisk(**data)
host.save()
return render_json({"status": "OK"})
def host_script(request):
# 根据作业id查询日志
data = ScriptExecInfo.objects.all()
client = get_client_by_request(request)
script_all = list()
for item in data:
temp_dict = dict()
kwargs = {}
kwargs['bk_biz_id'] = item.bk_biz_id
kwargs['job_instance_id'] = item.job_instance_id
result = client.job.get_job_instance_log(kwargs)
log_content = result['data'][0]['step_results'][0]['ip_logs'][0]['log_content']
temp_dict['host_ip'] = item.host_ip
temp_dict['log_content'] = log_content
temp_dict['script_content'] = item.script_content
temp_dict['create_time'] = item.create_time
script_all.append(temp_dict)
return render_mako_context(request,
'/home_application/host_script.html',
{'script_all': script_all},
)
def script_tijiao(request):
try:
print(request.user.username)
except Exception as e:
print(str(e))
data = json.loads(request.body)
client = get_client_by_request(request)
kwargs = {}
result = client.cc.search_business(kwargs)
bk_biz_id = result['data']['info'][0]['bk_biz_id']
script_content = base64.b64encode(data['script_content'])
kwargs = dict()
kwargs['bk_biz_id'] = bk_biz_id
kwargs['script_content'] = script_content
kwargs["account"] = "root"
kwargs['ip_list'] = [{'bk_cloud_id': 0, "ip": data['host_ip']}]
result = client.job.fast_execute_script(kwargs)
script_dict = dict()
script_dict["host_ip"] = data['host_ip']
script_dict["script_content"] = data['script_content']
script_dict["job_instance_id"] = result['data']['job_instance_id']
script_dict['bk_biz_id'] = bk_biz_id
scriptexecinfo = ScriptExecInfo(**script_dict)
scriptexecinfo.save()
return render_json({"status": "OK"})
# ####################其他
def other(request):
return render_mako_context(request, '/home_application/other.html')
@csrf_exempt # 注意:需要添加此装饰器
def upload_file(request):
# 接收的为文件列表,需要遍历操作
files = request.FILES
for item in files:
_file = files.get(item)
print(_file.name)
print(_file.size)
with open('./' + str(_file.name), 'wb') as fd:
fd.write(_file.file.read())
return render_json({"status": "OK"})
def download_file(request):
"""
文件下载
:param request:
:return: 文件response
"""
from django.http import FileResponse
# 接收文件名请求
file_name = request.GET.get('filename')
fd = open('./' + file_name, 'rb')
response = FileResponse(fd)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="%s"' % file_name
return response
| # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from common.mymako import render_mako_context, render_json
from blueking.component.shortcuts import get_client_by_request
from django.views.decorators.csrf import csrf_exempt
from models import TEST, HostDisk, ScriptExecInfo
import json
import base64
def home(request):
"""
首页
"""
# yewu = [
# {'id': 1, "name": u"业务1"},
# {'id': 2, "name": u"业务2"},
# {'id': 3, "name": u"业务3"},
# ]
# 从环境配置获取APP信息,从request获取当前用户信息
client = get_client_by_request(request)
kwargs = {}
result = client.cc.search_business(kwargs)
print(result)
yewu = result['data']['info']
return render_mako_context(request, '/home_application/home.html',
{
"yewu": yewu,
"AAA": u"业务列表"
})
def submit_template(request):
"""
首页
"""
print(request.body)
return render_json({"1111111": "dddddddddd"})
def dev_guide(request):
"""
开发指引
"""
return render_mako_context(request, '/home_application/dev_guide.html')
def contactus(request):
"""
联系我们
"""
return render_mako_context(request, '/home_application/contact.html')
def tijiao(request):
data = json.loads(request.body)
print(type(data))
sss = TEST(**data)
sss.save()
return render_json({"DATA": "AAAAAAAA"})
def host_disk(request):
host_list = HostDisk.objects.all()
re_list = list()
for item in host_list:
temp_dict = dict()
temp_dict['os'] = item.os
temp_dict['host_ip'] = item.host_ip
temp_dict['host_name'] = item.host_name
temp_dict['host_path'] = item.host_path
temp_dict['create_time'] = item.create_time
re_list.append(temp_dict)
print(re_list)
return render_mako_context(request,
'/home_application/host_disk.html',
{'host_all': re_list}
)
def host_tijiao(request):
data = request.body
print(type(data))
data = json.loads(data)
host = HostDisk(**data)
host.save()
return render_json({"status": "OK"})
def host_script(request):
# 根据作业id查询日志
data = ScriptExecInfo.objects.all()
client = get_client_by_request(request)
script_all = list()
for item in data:
temp_dict = dict()
kwargs = {}
kwargs['bk_biz_id'] = item.bk_biz_id
kwargs['job_instance_id'] = item.job_instance_id
result = client.job.get_job_instance_log(kwargs)
log_content = result['data'][0]['step_results'][0]['ip_logs'][0]['log_content']
temp_dict['host_ip'] = item.host_ip
temp_dict['log_content'] = log_content
temp_dict['script_content'] = item.script_content
temp_dict['create_time'] = item.create_time
script_all.append(temp_dict)
return render_mako_context(request,
'/home_application/host_script.html',
{'script_all': script_all},
)
def script_tijiao(request):
try:
print(request.user.username)
except Exception as e:
print(str(e))
data = json.loads(request.body)
client = get_client_by_request(request)
kwargs = {}
result = client.cc.search_business(kwargs)
bk_biz_id = result['data']['info'][0]['bk_biz_id']
script_content = base64.b64encode(data['script_content'])
kwargs = dict()
kwargs['bk_biz_id'] = bk_biz_id
kwargs['script_content'] = script_content
kwargs["account"] = "root"
kwargs['ip_list'] = [{'bk_cloud_id': 0, "ip": data['host_ip']}]
result = client.job.fast_execute_script(kwargs)
script_dict = dict()
script_dict["host_ip"] = data['host_ip']
script_dict["script_content"] = data['script_content']
script_dict["job_instance_id"] = result['data']['job_instance_id']
script_dict['bk_biz_id'] = bk_biz_id
scriptexecinfo = ScriptExecInfo(**script_dict)
scriptexecinfo.save()
return render_json({"status": "OK"})
# ####################其他
def other(request):
return render_mako_context(request, '/home_application/other.html')
@csrf_exempt # 注意:需要添加此装饰器
def upload_file(request):
# 接收的为文件列表,需要遍历操作
files = request.FILES
for item in files:
_file = files.get(item)
print(_file.name)
print(_file.size)
with open('./' + str(_file.name), 'wb') as fd:
fd.write(_file.file.read())
return render_json({"status": "OK"})
def download_file(request):
"""
文件下载
:param request:
:return: 文件response
"""
from django.http import FileResponse
# 接收文件名请求
file_name = request.GET.get('filename')
fd = open('./' + file_name, 'rb')
response = FileResponse(fd)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="%s"' % file_name
return response | en | 0.69726 | # -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available. Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 首页 # yewu = [ # {'id': 1, "name": u"业务1"}, # {'id': 2, "name": u"业务2"}, # {'id': 3, "name": u"业务3"}, # ] # 从环境配置获取APP信息,从request获取当前用户信息 首页 开发指引 联系我们 # 根据作业id查询日志 # ####################其他 # 注意:需要添加此装饰器 # 接收的为文件列表,需要遍历操作 文件下载 :param request: :return: 文件response # 接收文件名请求 | 1.761333 | 2 |
Chapter 6/09 - The built-in multiprocessing module/basic_multiprocessing.py | moseskim/Expert-Python-Programming-Fourth-Edition | 0 | 8347 | <filename>Chapter 6/09 - The built-in multiprocessing module/basic_multiprocessing.py
"""
"멀티프로세싱"절 예시
`multiprocessing` 모듈을 이용해 새로운 프로세스들을
생성하는 방법을 설명한다.
"""
from multiprocessing import Process
import os
def work(identifier):
print(f'Hey, I am the process ' f'{identifier}, pid: {os.getpid()}')
def main():
processes = [Process(target=work, args=(number,)) for number in range(5)]
for process in processes:
process.start()
while processes:
processes.pop().join()
if __name__ == "__main__":
main()
| <filename>Chapter 6/09 - The built-in multiprocessing module/basic_multiprocessing.py
"""
"멀티프로세싱"절 예시
`multiprocessing` 모듈을 이용해 새로운 프로세스들을
생성하는 방법을 설명한다.
"""
from multiprocessing import Process
import os
def work(identifier):
print(f'Hey, I am the process ' f'{identifier}, pid: {os.getpid()}')
def main():
processes = [Process(target=work, args=(number,)) for number in range(5)]
for process in processes:
process.start()
while processes:
processes.pop().join()
if __name__ == "__main__":
main()
| ko | 0.999824 | "멀티프로세싱"절 예시 `multiprocessing` 모듈을 이용해 새로운 프로세스들을 생성하는 방법을 설명한다. | 3.794383 | 4 |
sweeper/cloud/localhost/manager.py | dominoFire/sweeper | 0 | 8348 | <reponame>dominoFire/sweeper
__author__ = '@dominofire'
import os
from sweeper.cloud import resource_config_combinations
from sweeper.cloud.localhost import resource_config_factory as config_factory
from sweeper.resource import Resource
def possible_configs(num):
configs = config_factory.list_configs()
combs = resource_config_combinations(num, configs)
return combs
def create_resource(name, config_object):
res = Resource(config_object, name, 'localhost', None, None)
return res
def mount_distributed_file_system(name, vm_resources):
vm_first = vm_resources[0]
vm_first.execute_command('mkdir ./fileshare')
return os.path.join(os.getcwd(), 'fileshare')
| __author__ = '@dominofire'
import os
from sweeper.cloud import resource_config_combinations
from sweeper.cloud.localhost import resource_config_factory as config_factory
from sweeper.resource import Resource
def possible_configs(num):
configs = config_factory.list_configs()
combs = resource_config_combinations(num, configs)
return combs
def create_resource(name, config_object):
res = Resource(config_object, name, 'localhost', None, None)
return res
def mount_distributed_file_system(name, vm_resources):
vm_first = vm_resources[0]
vm_first.execute_command('mkdir ./fileshare')
return os.path.join(os.getcwd(), 'fileshare') | none | 1 | 2.060013 | 2 |
|
tfx/orchestration/experimental/core/service_jobs_test.py | BACtaki/tfx | 1,813 | 8349 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.service_jobs."""
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import test_utils
class ExceptionHandlingServiceJobManagerWrapperTest(test_utils.TfxTest):
def setUp(self):
super().setUp()
self._mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
self._mock_service_job_manager.ensure_node_services.return_value = (
service_jobs.ServiceStatus.SUCCESS)
self._mock_service_job_manager.stop_node_services.return_value = True
self._mock_service_job_manager.is_pure_service_node.return_value = True
self._mock_service_job_manager.is_mixed_service_node.return_value = False
self._wrapper = service_jobs.ExceptionHandlingServiceJobManagerWrapper(
self._mock_service_job_manager)
def test_calls_forwarded_to_underlying_instance(self):
self.assertEqual(service_jobs.ServiceStatus.SUCCESS,
self._wrapper.ensure_node_services(mock.Mock(), 'node1'))
self.assertTrue(self._wrapper.stop_node_services(mock.Mock(), 'node2'))
self.assertTrue(self._wrapper.is_pure_service_node(mock.Mock(), 'node3'))
self.assertFalse(self._wrapper.is_mixed_service_node(mock.Mock(), 'node4'))
self._mock_service_job_manager.ensure_node_services.assert_called_once_with(
mock.ANY, 'node1')
self._mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'node2')
self._mock_service_job_manager.is_pure_service_node.assert_called_once_with(
mock.ANY, 'node3')
self._mock_service_job_manager.is_mixed_service_node.assert_called_once_with(
mock.ANY, 'node4')
def test_ensure_node_services_exception_handling(self):
self._mock_service_job_manager.ensure_node_services.side_effect = RuntimeError(
'test error')
self.assertEqual(service_jobs.ServiceStatus.FAILED,
self._wrapper.ensure_node_services(mock.Mock(), 'node1'))
self._mock_service_job_manager.ensure_node_services.assert_called_once_with(
mock.ANY, 'node1')
def test_stop_node_services_exception_handling(self):
self._mock_service_job_manager.stop_node_services.side_effect = RuntimeError(
'test error')
self.assertFalse(self._wrapper.stop_node_services(mock.Mock(), 'node2'))
self._mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'node2')
if __name__ == '__main__':
tf.test.main()
| # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.service_jobs."""
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import test_utils
class ExceptionHandlingServiceJobManagerWrapperTest(test_utils.TfxTest):
def setUp(self):
super().setUp()
self._mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
self._mock_service_job_manager.ensure_node_services.return_value = (
service_jobs.ServiceStatus.SUCCESS)
self._mock_service_job_manager.stop_node_services.return_value = True
self._mock_service_job_manager.is_pure_service_node.return_value = True
self._mock_service_job_manager.is_mixed_service_node.return_value = False
self._wrapper = service_jobs.ExceptionHandlingServiceJobManagerWrapper(
self._mock_service_job_manager)
def test_calls_forwarded_to_underlying_instance(self):
self.assertEqual(service_jobs.ServiceStatus.SUCCESS,
self._wrapper.ensure_node_services(mock.Mock(), 'node1'))
self.assertTrue(self._wrapper.stop_node_services(mock.Mock(), 'node2'))
self.assertTrue(self._wrapper.is_pure_service_node(mock.Mock(), 'node3'))
self.assertFalse(self._wrapper.is_mixed_service_node(mock.Mock(), 'node4'))
self._mock_service_job_manager.ensure_node_services.assert_called_once_with(
mock.ANY, 'node1')
self._mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'node2')
self._mock_service_job_manager.is_pure_service_node.assert_called_once_with(
mock.ANY, 'node3')
self._mock_service_job_manager.is_mixed_service_node.assert_called_once_with(
mock.ANY, 'node4')
def test_ensure_node_services_exception_handling(self):
self._mock_service_job_manager.ensure_node_services.side_effect = RuntimeError(
'test error')
self.assertEqual(service_jobs.ServiceStatus.FAILED,
self._wrapper.ensure_node_services(mock.Mock(), 'node1'))
self._mock_service_job_manager.ensure_node_services.assert_called_once_with(
mock.ANY, 'node1')
def test_stop_node_services_exception_handling(self):
self._mock_service_job_manager.stop_node_services.side_effect = RuntimeError(
'test error')
self.assertFalse(self._wrapper.stop_node_services(mock.Mock(), 'node2'))
self._mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'node2')
if __name__ == '__main__':
tf.test.main()
| en | 0.842701 | # Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for tfx.orchestration.experimental.core.service_jobs. | 1.897286 | 2 |
dragonn/models.py | kundajelab/dragonn | 251 | 8350 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function
import matplotlib
import numpy as np
import os
import subprocess
import sys
import tempfile
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from abc import abstractmethod, ABCMeta
from dragonn.metrics import ClassificationResult
from sklearn.svm import SVC as scikit_SVC
from sklearn.tree import DecisionTreeClassifier as scikit_DecisionTree
from sklearn.ensemble import RandomForestClassifier
from keras.models import load_model
from dragonn.runtime_metrics import *
from dragonn.custom_losses import *
import warnings
warnings.filterwarnings('ignore')
def load_dragonn_model(model_string):
custom_objects={"recall":recall,
"sensitivity":recall,
"specificity":specificity,
"fpr":fpr,
"fnr":fnr,
"fdr":fdr,
"precision":precision,
"f1":f1,
"spearman_corr":spearman_corr,
"ambig_binary_crossentropy":ambig_binary_crossentropy,
"ambig_mean_squared_error":ambig_mean_squared_error}
model=load_model(model_string,custom_objects=custom_objects)
return model
class Model(object):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, **hyperparameters):
pass
@abstractmethod
def train(self, X, y, validation_data):
pass
@abstractmethod
def predict(self, X):
pass
def test(self, X, y):
return ClassificationResult(y, self.predict(X))
def score(self, X, y, metric):
return self.test(X, y)[metric]
class SequenceDNN(Model):
"""
Sequence DNN models.
Parameters
----------
seq_length : int, optional
length of input sequence.
keras_model : instance of keras.models.Sequential, optional
seq_length or keras_model must be specified.
num_tasks : int, optional
number of tasks. Default: 1.
num_filters : list[int] | tuple[int]
number of convolutional filters in each layer. Default: (15,).
conv_width : list[int] | tuple[int]
width of each layer's convolutional filters. Default: (15,).
pool_width : int
width of max pooling after the last layer. Default: 35.
L1 : float
strength of L1 penalty.
dropout : float
dropout probability in every convolutional layer. Default: 0.
verbose: int
Verbosity level during training. Valida values: 0, 1, 2.
Returns
-------
Compiled DNN model.
"""
def __init__(self, seq_length=None, keras_model=None,
use_RNN=False, num_tasks=1,
num_filters=(15, 15, 15), conv_width=(15, 15, 15),
pool_width=35, GRU_size=35, TDD_size=15,
L1=0, dropout=0.0, num_epochs=100, verbose=1):
from keras.models import Sequential
from keras.layers.core import (
Activation, Dense, Dropout, Flatten,
Permute, Reshape
)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.recurrent import GRU
from keras.regularizers import l1
self.num_tasks = num_tasks
self.num_epochs = num_epochs
self.verbose = verbose
self.train_metrics = []
self.valid_metrics = []
if keras_model is not None and seq_length is None:
self.model = keras_model
self.num_tasks = keras_model.layers[-1].output_shape[-1]
elif seq_length is not None and keras_model is None:
self.model = Sequential()
assert len(num_filters) == len(conv_width)
for i, (nb_filter, nb_col) in enumerate(zip(num_filters, conv_width)):
conv_height = 4 if i == 0 else 1
self.model.add(Convolution2D(
nb_filter=nb_filter, nb_row=conv_height,
nb_col=nb_col, activation='linear',
init='he_normal', input_shape=(1, 4, seq_length),
W_regularizer=l1(L1), b_regularizer=l1(L1)))
self.model.add(Activation('relu'))
self.model.add(Dropout(dropout))
self.model.add(MaxPooling2D(pool_size=(1, pool_width)))
if use_RNN:
num_max_pool_outputs = self.model.layers[-1].output_shape[-1]
self.model.add(Reshape((num_filters[-1], num_max_pool_outputs)))
self.model.add(Permute((2, 1)))
self.model.add(GRU(GRU_size, return_sequences=True))
self.model.add(TimeDistributedDense(TDD_size, activation='relu'))
self.model.add(Flatten())
self.model.add(Dense(output_dim=self.num_tasks))
self.model.add(Activation('sigmoid'))
self.model.compile(optimizer='adam', loss='binary_crossentropy')
else:
raise ValueError("Exactly one of seq_length or keras_model must be specified!")
def train(self, X, y, validation_data, early_stopping_metric='Loss',
early_stopping_patience=5, save_best_model_to_prefix=None):
if y.dtype != bool:
assert set(np.unique(y)) == {0, 1}
y = y.astype(bool)
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
if self.verbose >= 1:
print('Training model (* indicates new best result)...')
X_valid, y_valid = validation_data
early_stopping_wait = 0
best_metric = np.inf if early_stopping_metric == 'Loss' else -np.inf
for epoch in range(1, self.num_epochs + 1):
self.model.fit(X, y, batch_size=128, nb_epoch=1,
class_weight={True: num_sequences / num_positives,
False: num_sequences / num_negatives}
if not multitask else None, verbose=self.verbose >= 2)
epoch_train_metrics = self.test(X, y)
epoch_valid_metrics = self.test(X_valid, y_valid)
self.train_metrics.append(epoch_train_metrics)
self.valid_metrics.append(epoch_valid_metrics)
if self.verbose >= 1:
print('Epoch {}:'.format(epoch))
print('Train {}'.format(epoch_train_metrics))
print('Valid {}'.format(epoch_valid_metrics), end='')
current_metric = epoch_valid_metrics[early_stopping_metric].mean()
if (early_stopping_metric == 'Loss') == (current_metric <= best_metric):
if self.verbose >= 1:
print(' *')
best_metric = current_metric
best_epoch = epoch
early_stopping_wait = 0
if save_best_model_to_prefix is not None:
self.save(save_best_model_to_prefix)
else:
if self.verbose >= 1:
print()
if early_stopping_wait >= early_stopping_patience:
break
early_stopping_wait += 1
if self.verbose >= 1:
print('Finished training after {} epochs.'.format(epoch))
if save_best_model_to_prefix is not None:
print("The best model's architecture and weights (from epoch {0}) "
'were saved to {1}.arch.json and {1}.weights.h5'.format(
best_epoch, save_best_model_to_prefix))
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
def get_sequence_filters(self):
"""
Returns 3D array of 2D sequence filters.
"""
return self.model.layers[0].get_weights()[0].squeeze(axis=1)
@staticmethod
def _plot_scores(X, output_directory, peak_width, score_func, score_name):
from dragonn.plot import plot_bases_on_ax
scores = score_func(X).squeeze(axis=2) # (num_task, num_samples, num_bases, sequence_length)
try:
os.makedirs(output_directory)
except OSError:
pass
num_tasks = len(scores)
for task_index, task_scores in enumerate(scores):
for sequence_index, sequence_scores in enumerate(task_scores):
# sequence_scores is num_bases x sequence_length
basewise_max_sequence_scores = sequence_scores.max(axis=0)
plt.clf()
figure, (top_axis, bottom_axis) = plt.subplots(2)
top_axis.plot(range(1, len(basewise_max_sequence_scores) + 1),
basewise_max_sequence_scores)
top_axis.set_title('{} scores (motif highlighted)'.format(score_name))
peak_position = basewise_max_sequence_scores.argmax()
top_axis.axvspan(peak_position - peak_width, peak_position + peak_width,
color='grey', alpha=0.1)
peak_sequence_scores = sequence_scores[:, peak_position - peak_width :
peak_position + peak_width].T
# Set non-max letter_heights to zero
letter_heights = np.zeros_like(peak_sequence_scores)
letter_heights[np.arange(len(letter_heights)),
peak_sequence_scores.argmax(axis=1)] = \
basewise_max_sequence_scores[peak_position - peak_width :
peak_position + peak_width]
plot_bases_on_ax(letter_heights, bottom_axis)
bottom_axis.set_xticklabels(tuple(map(
str, np.arange(peak_position - peak_width, peak_position + peak_width + 1))))
bottom_axis.tick_params(axis='x', labelsize='small')
plt.xlabel('Position')
plt.ylabel('Score')
plt.savefig(os.path.join(output_directory, 'sequence_{}{}'.format(
sequence_index, '_task_{}'.format(task_index) if num_tasks > 1 else '')))
plt.close()
def plot_deeplift(self, X, output_directory, peak_width=10):
self._plot_scores(X, output_directory, peak_width,
score_func=self.deeplift, score_name='DeepLift')
def plot_in_silico_mutagenesis(self, X, output_directory, peak_width=10):
self._plot_scores(X, output_directory, peak_width,
score_func=self.in_silico_mutagenesis, score_name='ISM')
def plot_architecture(self, output_file):
from dragonn.visualize_util import plot as plot_keras_model
plot_keras_model(self.model, output_file, show_shape=True)
def save(self, save_best_model_to_prefix):
arch_fname = save_best_model_to_prefix + '.arch.json'
weights_fname = save_best_model_to_prefix + '.weights.h5'
open(arch_fname, 'w').write(self.model.to_json())
self.model.save_weights(weights_fname, overwrite=True)
@staticmethod
def load(model_hdf5_fname=None, arch_fname=None, weights_fname=None):
if model_hdf5_fname!=None:
from keras.models import load_model
sequence_dnn=SequenceDNN(keras_model=load_model(model_hdf5_fname))
else:
from keras.models import model_from_json
model_json_string = open(arch_fname).read()
sequence_dnn = SequenceDNN(keras_model=model_from_json(model_json_string))
if weights_fname is not None:
sequence_dnn.model.load_weights(weights_fname)
return sequence_dnn
class MotifScoreRNN(Model):
def __init__(self, input_shape, gru_size=10, tdd_size=4):
from keras.models import Sequential
from keras.layers.core import (
Activation, Dense, Flatten, TimeDistributedDense
)
from keras.layers.recurrent import GRU
self.model = Sequential()
self.model.add(GRU(gru_size, return_sequences=True,
input_shape=input_shape))
if tdd_size is not None:
self.model.add(TimeDistributedDense(tdd_size))
self.model.add(Flatten())
self.model.add(Dense(1))
self.model.add(Activation('sigmoid'))
print('Compiling model...')
self.model.compile(optimizer='adam', loss='binary_crossentropy')
def train(self, X, y, validation_data):
from keras.callbacks import EarlyStopping
print('Training model...')
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
self.model.fit(
X, y, batch_size=128, nb_epoch=100,
validation_data=validation_data,
class_weight={True: num_sequences / num_positives,
False: num_sequences / num_negatives}
if not multitask else None,
callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
verbose=True)
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
class gkmSVM(Model):
def __init__(self, prefix='./gkmSVM', word_length=11, mismatches=3, C=1,
threads=1, cache_memory=100, verbosity=4):
self.word_length = word_length
self.mismatches = mismatches
self.C = C
self.threads = threads
self.prefix = '_'.join(map(str, (prefix, word_length, mismatches, C)))
options_list = zip(
['-l', '-d', '-c', '-T', '-m', '-v'],
map(str, (word_length, mismatches, C, threads, cache_memory, verbosity)))
self.options = ' '.join([' '.join(option) for option in options_list])
@property
def model_file(self):
model_fname = '{}.model.txt'.format(self.prefix)
return model_fname if os.path.isfile(model_fname) else None
@staticmethod
def encode_sequence_into_fasta_file(sequence_iterator, ofname):
"""writes sequences into fasta file
"""
with open(ofname, "w") as wf:
for i, seq in enumerate(sequence_iterator):
print('>{}'.format(i), file=wf)
print(seq, file=wf)
def train(self, X, y, validation_data=None):
"""
Trains gkm-svm, saves model file.
"""
y = y.squeeze()
pos_sequence = X[y]
neg_sequence = X[~y]
pos_fname = "%s.pos_seq.fa" % self.prefix
neg_fname = "%s.neg_seq.fa" % self.prefix
# create temporary fasta files
self.encode_sequence_into_fasta_file(pos_sequence, pos_fname)
self.encode_sequence_into_fasta_file(neg_sequence, neg_fname)
# run command
command = ' '.join(
('gkmtrain', self.options, pos_fname, neg_fname, self.prefix))
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
process.wait() # wait for it to finish
# remove fasta files
os.system("rm %s" % pos_fname)
os.system("rm %s" % neg_fname)
def predict(self, X):
if self.model_file is None:
raise RuntimeError("GkmSvm hasn't been trained!")
# write test fasta file
test_fname = "%s.test.fa" % self.prefix
self.encode_sequence_into_fasta_file(X, test_fname)
# test gkmsvm
temp_ofp = tempfile.NamedTemporaryFile()
threads_option = '-T %s' % (str(self.threads))
command = ' '.join(['gkmpredict',
test_fname,
self.model_file,
temp_ofp.name,
threads_option])
process = subprocess.Popen(command, shell=True)
process.wait() # wait for it to finish
os.system("rm %s" % test_fname) # remove fasta file
# get classification results
temp_ofp.seek(0)
y = np.array([line.split()[-1] for line in temp_ofp], dtype=float)
temp_ofp.close()
return np.expand_dims(y, 1)
class SVC(Model):
def __init__(self):
self.classifier = scikit_SVC(probability=True, kernel='linear')
def train(self, X, y, validation_data=None):
self.classifier.fit(X, y)
def predict(self, X):
return self.classifier.predict_proba(X)[:, 1:]
class DecisionTree(Model):
def __init__(self):
self.classifier = scikit_DecisionTree()
def train(self, X, y, validation_data=None):
self.classifier.fit(X, y)
def predict(self, X):
predictions = np.asarray(self.classifier.predict_proba(X))[..., 1]
if len(predictions.shape) == 2: # multitask
predictions = predictions.T
else: # single-task
predictions = np.expand_dims(predictions, 1)
return predictions
class RandomForest(DecisionTree):
def __init__(self):
self.classifier = RandomForestClassifier(n_estimators=100)
| from __future__ import absolute_import, division, print_function
import matplotlib
import numpy as np
import os
import subprocess
import sys
import tempfile
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from abc import abstractmethod, ABCMeta
from dragonn.metrics import ClassificationResult
from sklearn.svm import SVC as scikit_SVC
from sklearn.tree import DecisionTreeClassifier as scikit_DecisionTree
from sklearn.ensemble import RandomForestClassifier
from keras.models import load_model
from dragonn.runtime_metrics import *
from dragonn.custom_losses import *
import warnings
warnings.filterwarnings('ignore')
def load_dragonn_model(model_string):
custom_objects={"recall":recall,
"sensitivity":recall,
"specificity":specificity,
"fpr":fpr,
"fnr":fnr,
"fdr":fdr,
"precision":precision,
"f1":f1,
"spearman_corr":spearman_corr,
"ambig_binary_crossentropy":ambig_binary_crossentropy,
"ambig_mean_squared_error":ambig_mean_squared_error}
model=load_model(model_string,custom_objects=custom_objects)
return model
class Model(object):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, **hyperparameters):
pass
@abstractmethod
def train(self, X, y, validation_data):
pass
@abstractmethod
def predict(self, X):
pass
def test(self, X, y):
return ClassificationResult(y, self.predict(X))
def score(self, X, y, metric):
return self.test(X, y)[metric]
class SequenceDNN(Model):
"""
Sequence DNN models.
Parameters
----------
seq_length : int, optional
length of input sequence.
keras_model : instance of keras.models.Sequential, optional
seq_length or keras_model must be specified.
num_tasks : int, optional
number of tasks. Default: 1.
num_filters : list[int] | tuple[int]
number of convolutional filters in each layer. Default: (15,).
conv_width : list[int] | tuple[int]
width of each layer's convolutional filters. Default: (15,).
pool_width : int
width of max pooling after the last layer. Default: 35.
L1 : float
strength of L1 penalty.
dropout : float
dropout probability in every convolutional layer. Default: 0.
verbose: int
Verbosity level during training. Valida values: 0, 1, 2.
Returns
-------
Compiled DNN model.
"""
def __init__(self, seq_length=None, keras_model=None,
use_RNN=False, num_tasks=1,
num_filters=(15, 15, 15), conv_width=(15, 15, 15),
pool_width=35, GRU_size=35, TDD_size=15,
L1=0, dropout=0.0, num_epochs=100, verbose=1):
from keras.models import Sequential
from keras.layers.core import (
Activation, Dense, Dropout, Flatten,
Permute, Reshape
)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.recurrent import GRU
from keras.regularizers import l1
self.num_tasks = num_tasks
self.num_epochs = num_epochs
self.verbose = verbose
self.train_metrics = []
self.valid_metrics = []
if keras_model is not None and seq_length is None:
self.model = keras_model
self.num_tasks = keras_model.layers[-1].output_shape[-1]
elif seq_length is not None and keras_model is None:
self.model = Sequential()
assert len(num_filters) == len(conv_width)
for i, (nb_filter, nb_col) in enumerate(zip(num_filters, conv_width)):
conv_height = 4 if i == 0 else 1
self.model.add(Convolution2D(
nb_filter=nb_filter, nb_row=conv_height,
nb_col=nb_col, activation='linear',
init='he_normal', input_shape=(1, 4, seq_length),
W_regularizer=l1(L1), b_regularizer=l1(L1)))
self.model.add(Activation('relu'))
self.model.add(Dropout(dropout))
self.model.add(MaxPooling2D(pool_size=(1, pool_width)))
if use_RNN:
num_max_pool_outputs = self.model.layers[-1].output_shape[-1]
self.model.add(Reshape((num_filters[-1], num_max_pool_outputs)))
self.model.add(Permute((2, 1)))
self.model.add(GRU(GRU_size, return_sequences=True))
self.model.add(TimeDistributedDense(TDD_size, activation='relu'))
self.model.add(Flatten())
self.model.add(Dense(output_dim=self.num_tasks))
self.model.add(Activation('sigmoid'))
self.model.compile(optimizer='adam', loss='binary_crossentropy')
else:
raise ValueError("Exactly one of seq_length or keras_model must be specified!")
def train(self, X, y, validation_data, early_stopping_metric='Loss',
early_stopping_patience=5, save_best_model_to_prefix=None):
if y.dtype != bool:
assert set(np.unique(y)) == {0, 1}
y = y.astype(bool)
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
if self.verbose >= 1:
print('Training model (* indicates new best result)...')
X_valid, y_valid = validation_data
early_stopping_wait = 0
best_metric = np.inf if early_stopping_metric == 'Loss' else -np.inf
for epoch in range(1, self.num_epochs + 1):
self.model.fit(X, y, batch_size=128, nb_epoch=1,
class_weight={True: num_sequences / num_positives,
False: num_sequences / num_negatives}
if not multitask else None, verbose=self.verbose >= 2)
epoch_train_metrics = self.test(X, y)
epoch_valid_metrics = self.test(X_valid, y_valid)
self.train_metrics.append(epoch_train_metrics)
self.valid_metrics.append(epoch_valid_metrics)
if self.verbose >= 1:
print('Epoch {}:'.format(epoch))
print('Train {}'.format(epoch_train_metrics))
print('Valid {}'.format(epoch_valid_metrics), end='')
current_metric = epoch_valid_metrics[early_stopping_metric].mean()
if (early_stopping_metric == 'Loss') == (current_metric <= best_metric):
if self.verbose >= 1:
print(' *')
best_metric = current_metric
best_epoch = epoch
early_stopping_wait = 0
if save_best_model_to_prefix is not None:
self.save(save_best_model_to_prefix)
else:
if self.verbose >= 1:
print()
if early_stopping_wait >= early_stopping_patience:
break
early_stopping_wait += 1
if self.verbose >= 1:
print('Finished training after {} epochs.'.format(epoch))
if save_best_model_to_prefix is not None:
print("The best model's architecture and weights (from epoch {0}) "
'were saved to {1}.arch.json and {1}.weights.h5'.format(
best_epoch, save_best_model_to_prefix))
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
def get_sequence_filters(self):
"""
Returns 3D array of 2D sequence filters.
"""
return self.model.layers[0].get_weights()[0].squeeze(axis=1)
@staticmethod
def _plot_scores(X, output_directory, peak_width, score_func, score_name):
from dragonn.plot import plot_bases_on_ax
scores = score_func(X).squeeze(axis=2) # (num_task, num_samples, num_bases, sequence_length)
try:
os.makedirs(output_directory)
except OSError:
pass
num_tasks = len(scores)
for task_index, task_scores in enumerate(scores):
for sequence_index, sequence_scores in enumerate(task_scores):
# sequence_scores is num_bases x sequence_length
basewise_max_sequence_scores = sequence_scores.max(axis=0)
plt.clf()
figure, (top_axis, bottom_axis) = plt.subplots(2)
top_axis.plot(range(1, len(basewise_max_sequence_scores) + 1),
basewise_max_sequence_scores)
top_axis.set_title('{} scores (motif highlighted)'.format(score_name))
peak_position = basewise_max_sequence_scores.argmax()
top_axis.axvspan(peak_position - peak_width, peak_position + peak_width,
color='grey', alpha=0.1)
peak_sequence_scores = sequence_scores[:, peak_position - peak_width :
peak_position + peak_width].T
# Set non-max letter_heights to zero
letter_heights = np.zeros_like(peak_sequence_scores)
letter_heights[np.arange(len(letter_heights)),
peak_sequence_scores.argmax(axis=1)] = \
basewise_max_sequence_scores[peak_position - peak_width :
peak_position + peak_width]
plot_bases_on_ax(letter_heights, bottom_axis)
bottom_axis.set_xticklabels(tuple(map(
str, np.arange(peak_position - peak_width, peak_position + peak_width + 1))))
bottom_axis.tick_params(axis='x', labelsize='small')
plt.xlabel('Position')
plt.ylabel('Score')
plt.savefig(os.path.join(output_directory, 'sequence_{}{}'.format(
sequence_index, '_task_{}'.format(task_index) if num_tasks > 1 else '')))
plt.close()
def plot_deeplift(self, X, output_directory, peak_width=10):
self._plot_scores(X, output_directory, peak_width,
score_func=self.deeplift, score_name='DeepLift')
def plot_in_silico_mutagenesis(self, X, output_directory, peak_width=10):
self._plot_scores(X, output_directory, peak_width,
score_func=self.in_silico_mutagenesis, score_name='ISM')
def plot_architecture(self, output_file):
from dragonn.visualize_util import plot as plot_keras_model
plot_keras_model(self.model, output_file, show_shape=True)
def save(self, save_best_model_to_prefix):
arch_fname = save_best_model_to_prefix + '.arch.json'
weights_fname = save_best_model_to_prefix + '.weights.h5'
open(arch_fname, 'w').write(self.model.to_json())
self.model.save_weights(weights_fname, overwrite=True)
@staticmethod
def load(model_hdf5_fname=None, arch_fname=None, weights_fname=None):
if model_hdf5_fname!=None:
from keras.models import load_model
sequence_dnn=SequenceDNN(keras_model=load_model(model_hdf5_fname))
else:
from keras.models import model_from_json
model_json_string = open(arch_fname).read()
sequence_dnn = SequenceDNN(keras_model=model_from_json(model_json_string))
if weights_fname is not None:
sequence_dnn.model.load_weights(weights_fname)
return sequence_dnn
class MotifScoreRNN(Model):
def __init__(self, input_shape, gru_size=10, tdd_size=4):
from keras.models import Sequential
from keras.layers.core import (
Activation, Dense, Flatten, TimeDistributedDense
)
from keras.layers.recurrent import GRU
self.model = Sequential()
self.model.add(GRU(gru_size, return_sequences=True,
input_shape=input_shape))
if tdd_size is not None:
self.model.add(TimeDistributedDense(tdd_size))
self.model.add(Flatten())
self.model.add(Dense(1))
self.model.add(Activation('sigmoid'))
print('Compiling model...')
self.model.compile(optimizer='adam', loss='binary_crossentropy')
def train(self, X, y, validation_data):
from keras.callbacks import EarlyStopping
print('Training model...')
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
self.model.fit(
X, y, batch_size=128, nb_epoch=100,
validation_data=validation_data,
class_weight={True: num_sequences / num_positives,
False: num_sequences / num_negatives}
if not multitask else None,
callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
verbose=True)
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
class gkmSVM(Model):
def __init__(self, prefix='./gkmSVM', word_length=11, mismatches=3, C=1,
threads=1, cache_memory=100, verbosity=4):
self.word_length = word_length
self.mismatches = mismatches
self.C = C
self.threads = threads
self.prefix = '_'.join(map(str, (prefix, word_length, mismatches, C)))
options_list = zip(
['-l', '-d', '-c', '-T', '-m', '-v'],
map(str, (word_length, mismatches, C, threads, cache_memory, verbosity)))
self.options = ' '.join([' '.join(option) for option in options_list])
@property
def model_file(self):
model_fname = '{}.model.txt'.format(self.prefix)
return model_fname if os.path.isfile(model_fname) else None
@staticmethod
def encode_sequence_into_fasta_file(sequence_iterator, ofname):
"""writes sequences into fasta file
"""
with open(ofname, "w") as wf:
for i, seq in enumerate(sequence_iterator):
print('>{}'.format(i), file=wf)
print(seq, file=wf)
def train(self, X, y, validation_data=None):
"""
Trains gkm-svm, saves model file.
"""
y = y.squeeze()
pos_sequence = X[y]
neg_sequence = X[~y]
pos_fname = "%s.pos_seq.fa" % self.prefix
neg_fname = "%s.neg_seq.fa" % self.prefix
# create temporary fasta files
self.encode_sequence_into_fasta_file(pos_sequence, pos_fname)
self.encode_sequence_into_fasta_file(neg_sequence, neg_fname)
# run command
command = ' '.join(
('gkmtrain', self.options, pos_fname, neg_fname, self.prefix))
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
process.wait() # wait for it to finish
# remove fasta files
os.system("rm %s" % pos_fname)
os.system("rm %s" % neg_fname)
def predict(self, X):
if self.model_file is None:
raise RuntimeError("GkmSvm hasn't been trained!")
# write test fasta file
test_fname = "%s.test.fa" % self.prefix
self.encode_sequence_into_fasta_file(X, test_fname)
# test gkmsvm
temp_ofp = tempfile.NamedTemporaryFile()
threads_option = '-T %s' % (str(self.threads))
command = ' '.join(['gkmpredict',
test_fname,
self.model_file,
temp_ofp.name,
threads_option])
process = subprocess.Popen(command, shell=True)
process.wait() # wait for it to finish
os.system("rm %s" % test_fname) # remove fasta file
# get classification results
temp_ofp.seek(0)
y = np.array([line.split()[-1] for line in temp_ofp], dtype=float)
temp_ofp.close()
return np.expand_dims(y, 1)
class SVC(Model):
def __init__(self):
self.classifier = scikit_SVC(probability=True, kernel='linear')
def train(self, X, y, validation_data=None):
self.classifier.fit(X, y)
def predict(self, X):
return self.classifier.predict_proba(X)[:, 1:]
class DecisionTree(Model):
def __init__(self):
self.classifier = scikit_DecisionTree()
def train(self, X, y, validation_data=None):
self.classifier.fit(X, y)
def predict(self, X):
predictions = np.asarray(self.classifier.predict_proba(X))[..., 1]
if len(predictions.shape) == 2: # multitask
predictions = predictions.T
else: # single-task
predictions = np.expand_dims(predictions, 1)
return predictions
class RandomForest(DecisionTree):
def __init__(self):
self.classifier = RandomForestClassifier(n_estimators=100) | en | 0.628045 | Sequence DNN models. Parameters ---------- seq_length : int, optional length of input sequence. keras_model : instance of keras.models.Sequential, optional seq_length or keras_model must be specified. num_tasks : int, optional number of tasks. Default: 1. num_filters : list[int] | tuple[int] number of convolutional filters in each layer. Default: (15,). conv_width : list[int] | tuple[int] width of each layer's convolutional filters. Default: (15,). pool_width : int width of max pooling after the last layer. Default: 35. L1 : float strength of L1 penalty. dropout : float dropout probability in every convolutional layer. Default: 0. verbose: int Verbosity level during training. Valida values: 0, 1, 2. Returns ------- Compiled DNN model. Returns 3D array of 2D sequence filters. # (num_task, num_samples, num_bases, sequence_length) # sequence_scores is num_bases x sequence_length # Set non-max letter_heights to zero writes sequences into fasta file Trains gkm-svm, saves model file. # create temporary fasta files # run command # wait for it to finish # remove fasta files # write test fasta file # test gkmsvm # wait for it to finish # remove fasta file # get classification results # multitask # single-task | 2.406397 | 2 |
src/mpass/mpass/migrations/0001_initial.py | haltu/velmu-mpass-demo | 0 | 8351 | <reponame>haltu/velmu-mpass-demo<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-20 08:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import parler.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AuthenticationSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('auth_id', models.CharField(max_length=128)),
('icon_url', models.CharField(blank=True, max_length=2048, null=True)),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='AuthenticationSourceTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('title', models.CharField(max_length=2048)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='mpass.AuthenticationSource')),
],
options={
'managed': True,
'db_table': 'mpass_authenticationsource_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'authentication source Translation',
},
),
migrations.CreateModel(
name='AuthenticationTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('tag_id', models.CharField(max_length=128)),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='AuthenticationTagTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('title', models.CharField(max_length=2048)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='mpass.AuthenticationTag')),
],
options={
'managed': True,
'db_table': 'mpass_authenticationtag_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'authentication tag Translation',
},
),
migrations.AddField(
model_name='authenticationsource',
name='tags',
field=models.ManyToManyField(blank=True, to='mpass.AuthenticationTag'),
),
migrations.AlterUniqueTogether(
name='authenticationtagtranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='authenticationsourcetranslation',
unique_together=set([('language_code', 'master')]),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-20 08:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import parler.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AuthenticationSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('auth_id', models.CharField(max_length=128)),
('icon_url', models.CharField(blank=True, max_length=2048, null=True)),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='AuthenticationSourceTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('title', models.CharField(max_length=2048)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='mpass.AuthenticationSource')),
],
options={
'managed': True,
'db_table': 'mpass_authenticationsource_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'authentication source Translation',
},
),
migrations.CreateModel(
name='AuthenticationTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('tag_id', models.CharField(max_length=128)),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='AuthenticationTagTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('title', models.CharField(max_length=2048)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='mpass.AuthenticationTag')),
],
options={
'managed': True,
'db_table': 'mpass_authenticationtag_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'authentication tag Translation',
},
),
migrations.AddField(
model_name='authenticationsource',
name='tags',
field=models.ManyToManyField(blank=True, to='mpass.AuthenticationTag'),
),
migrations.AlterUniqueTogether(
name='authenticationtagtranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='authenticationsourcetranslation',
unique_together=set([('language_code', 'master')]),
),
] | en | 0.636876 | # -*- coding: utf-8 -*- # Generated by Django 1.11.10 on 2018-03-20 08:34 | 1.648677 | 2 |
dgt/inference/forward_inference.py | fractalego/dgt | 3 | 8352 | <filename>dgt/inference/forward_inference.py
import logging
import random
from dgt.graph.graph_matcher import GraphWeightedMatch
from dgt.utils import graph_iterations
_logger = logging.getLogger(__name__)
def find_weight_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return 1
def clean_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
new_s = s[:start - 1] + s[end + 1:]
return new_s
except ValueError:
return s
def eliminate_spaces(line):
line = line.replace(' ', '')
line = line.replace('\t', '')
line = line.replace('\n', '')
return line
class UniqueNamesModifier:
def apply(self, g):
from ..auxiliary import get_random_name
substitution_dict = {}
for v in g.vs:
random_name = get_random_name()
old_name = v['name']
new_name = old_name + random_name
v['name'] = new_name
substitution_dict[old_name] = new_name
try:
for v in g.vs:
referring_name = v['refers_to']
if referring_name:
v['refers_to'] = substitution_dict[referring_name]
except Exception as e:
_logger.warning("Exception while substituting refers_to ID: " + str(e))
for e in g.es:
e['name'] += get_random_name()
class BaseForwardInference:
def compute(self):
return None
class ForwardInference(BaseForwardInference):
_unique = UniqueNamesModifier()
def __init__(self, data, knowledge, permutation_shift, max_depth=1):
self.permutations = permutation_shift
self.data = data
self.knowledge = knowledge
self._max_depth = max_depth
self.permutation_shift = permutation_shift
def __apply_clause_to_graph(self, rule, data, i):
drs = data.copy()
drs.visit(self._unique)
w = 1
iterations = graph_iterations(drs._g)
if not iterations:
return drs, 0
drs._g = iterations[self.permutations[i] % len(iterations)]
if not rule.gradient:
weighted_match = GraphWeightedMatch(rule.get_hypothesis(), self.knowledge._metric,
self.knowledge._relations_metric)
w = drs.visit(weighted_match)
is_match = drs.visit(rule)
if not is_match:
return drs, 0
return drs, w
def _compute_step(self, data_tuple, i):
"""
Applies all the rules to a drs
:return: all the variants of the drs after a rule match as a pair (<NEW_DRS>, <WEIGHT>)
"""
data = data_tuple[0]
prior_w = data_tuple[1]
clauses = self.knowledge.ask_rule(data)
results = []
for clause_tuple in clauses:
rule = clause_tuple[0]
rule_weight = rule.weight
prior_rules = list(data_tuple[2])
if rule in prior_rules: # A rule can be used only once per path
continue
drs, w = self.__apply_clause_to_graph(rule, data, i)
if w > 0:
prior_rules.append(rule)
prior_rules.append(drs)
results.append((drs, prior_w * w * rule_weight, prior_rules))
return results
def compute(self):
results = []
to_process = [(self.data, 1, [self.data])]
for i in range(self._max_depth):
new_results = []
for data_tuple in to_process:
new_results += self._compute_step(data_tuple, i)
if not new_results:
break
to_process = sorted(new_results, key=lambda x: -x[1])
results += to_process
results = sorted(results, key=lambda x: -x[1])
return results
| <filename>dgt/inference/forward_inference.py
import logging
import random
from dgt.graph.graph_matcher import GraphWeightedMatch
from dgt.utils import graph_iterations
_logger = logging.getLogger(__name__)
def find_weight_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return 1
def clean_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
new_s = s[:start - 1] + s[end + 1:]
return new_s
except ValueError:
return s
def eliminate_spaces(line):
line = line.replace(' ', '')
line = line.replace('\t', '')
line = line.replace('\n', '')
return line
class UniqueNamesModifier:
def apply(self, g):
from ..auxiliary import get_random_name
substitution_dict = {}
for v in g.vs:
random_name = get_random_name()
old_name = v['name']
new_name = old_name + random_name
v['name'] = new_name
substitution_dict[old_name] = new_name
try:
for v in g.vs:
referring_name = v['refers_to']
if referring_name:
v['refers_to'] = substitution_dict[referring_name]
except Exception as e:
_logger.warning("Exception while substituting refers_to ID: " + str(e))
for e in g.es:
e['name'] += get_random_name()
class BaseForwardInference:
def compute(self):
return None
class ForwardInference(BaseForwardInference):
_unique = UniqueNamesModifier()
def __init__(self, data, knowledge, permutation_shift, max_depth=1):
self.permutations = permutation_shift
self.data = data
self.knowledge = knowledge
self._max_depth = max_depth
self.permutation_shift = permutation_shift
def __apply_clause_to_graph(self, rule, data, i):
drs = data.copy()
drs.visit(self._unique)
w = 1
iterations = graph_iterations(drs._g)
if not iterations:
return drs, 0
drs._g = iterations[self.permutations[i] % len(iterations)]
if not rule.gradient:
weighted_match = GraphWeightedMatch(rule.get_hypothesis(), self.knowledge._metric,
self.knowledge._relations_metric)
w = drs.visit(weighted_match)
is_match = drs.visit(rule)
if not is_match:
return drs, 0
return drs, w
def _compute_step(self, data_tuple, i):
"""
Applies all the rules to a drs
:return: all the variants of the drs after a rule match as a pair (<NEW_DRS>, <WEIGHT>)
"""
data = data_tuple[0]
prior_w = data_tuple[1]
clauses = self.knowledge.ask_rule(data)
results = []
for clause_tuple in clauses:
rule = clause_tuple[0]
rule_weight = rule.weight
prior_rules = list(data_tuple[2])
if rule in prior_rules: # A rule can be used only once per path
continue
drs, w = self.__apply_clause_to_graph(rule, data, i)
if w > 0:
prior_rules.append(rule)
prior_rules.append(drs)
results.append((drs, prior_w * w * rule_weight, prior_rules))
return results
def compute(self):
results = []
to_process = [(self.data, 1, [self.data])]
for i in range(self._max_depth):
new_results = []
for data_tuple in to_process:
new_results += self._compute_step(data_tuple, i)
if not new_results:
break
to_process = sorted(new_results, key=lambda x: -x[1])
results += to_process
results = sorted(results, key=lambda x: -x[1])
return results
| en | 0.851442 | Applies all the rules to a drs :return: all the variants of the drs after a rule match as a pair (<NEW_DRS>, <WEIGHT>) # A rule can be used only once per path | 2.255202 | 2 |
serverPythonClient/client.py | ikekilinc/dnnSuperBinoculars | 0 | 8353 | <filename>serverPythonClient/client.py
import argparse
import cv2
import common
# from .utils.cropAtCenter import cropImageCenter
# from cropAtCenter import cropImageCenter
from gabriel_client.websocket_client import WebsocketClient
from gabriel_client.opencv_adapter import OpencvAdapter
DEFAULT_SERVER_HOST = '172.16.31.10'
DEFAULT_ZOOM_FACTOR = 10
def preprocess(frame):
# return frame
print(type(frame), frame.shape)
width, height = frame.shape[1], frame.shape[0]
left = int(width/2 * (1 - 1/DEFAULT_ZOOM_FACTOR))
top = int(height/2 * (1 - 1/DEFAULT_ZOOM_FACTOR))
right = int(width/2 * (1 + 1/DEFAULT_ZOOM_FACTOR))
bottom = int(height/2 * (1 + 1/DEFAULT_ZOOM_FACTOR))
cropped_frame = frame[top:bottom, left:right]
return cropped_frame
def produce_extras():
return None
def consume_frame(frame, _):
cv2.imshow('Image from server', frame)
cv2.waitKey(1)
def main():
common.configure_logging()
parser = argparse.ArgumentParser()
parser.add_argument(
'source_name', nargs='?', default=common.DEFAULT_SOURCE_NAME)
parser.add_argument('server_host', nargs='?', default=DEFAULT_SERVER_HOST)
args = parser.parse_args()
capture = cv2.VideoCapture(0)
opencv_adapter = OpencvAdapter(
preprocess, produce_extras, consume_frame, capture, args.source_name)
client = WebsocketClient(
args.server_host, common.WEBSOCKET_PORT,
opencv_adapter.get_producer_wrappers(), opencv_adapter.consumer)
client.launch()
if __name__ == '__main__':
main()
| <filename>serverPythonClient/client.py
import argparse
import cv2
import common
# from .utils.cropAtCenter import cropImageCenter
# from cropAtCenter import cropImageCenter
from gabriel_client.websocket_client import WebsocketClient
from gabriel_client.opencv_adapter import OpencvAdapter
DEFAULT_SERVER_HOST = '172.16.31.10'
DEFAULT_ZOOM_FACTOR = 10
def preprocess(frame):
# return frame
print(type(frame), frame.shape)
width, height = frame.shape[1], frame.shape[0]
left = int(width/2 * (1 - 1/DEFAULT_ZOOM_FACTOR))
top = int(height/2 * (1 - 1/DEFAULT_ZOOM_FACTOR))
right = int(width/2 * (1 + 1/DEFAULT_ZOOM_FACTOR))
bottom = int(height/2 * (1 + 1/DEFAULT_ZOOM_FACTOR))
cropped_frame = frame[top:bottom, left:right]
return cropped_frame
def produce_extras():
return None
def consume_frame(frame, _):
cv2.imshow('Image from server', frame)
cv2.waitKey(1)
def main():
common.configure_logging()
parser = argparse.ArgumentParser()
parser.add_argument(
'source_name', nargs='?', default=common.DEFAULT_SOURCE_NAME)
parser.add_argument('server_host', nargs='?', default=DEFAULT_SERVER_HOST)
args = parser.parse_args()
capture = cv2.VideoCapture(0)
opencv_adapter = OpencvAdapter(
preprocess, produce_extras, consume_frame, capture, args.source_name)
client = WebsocketClient(
args.server_host, common.WEBSOCKET_PORT,
opencv_adapter.get_producer_wrappers(), opencv_adapter.consumer)
client.launch()
if __name__ == '__main__':
main()
| en | 0.361131 | # from .utils.cropAtCenter import cropImageCenter # from cropAtCenter import cropImageCenter # return frame | 2.635863 | 3 |
src/DeepCard.API/batch.py | SharsDela/BankCardRecognize | 7 | 8354 | from api import get_result
import os
import shutil
from glob import glob
from PIL import Image
if __name__ == '__main__':
image_files = glob('./test_images/*.*')
result_dir = './test_results'
if os.path.exists(result_dir):
shutil.rmtree(result_dir)
os.mkdir(result_dir)
txt_file = os.path.join(result_dir, 'result.txt')
txt_f = open(txt_file, 'w')
for image_file in sorted(image_files):
if ".gitkeep" in image_files:
continue
print("Finded file", image_file, end=" ")
result = get_result(Image.open(image_file))
print(":", result)
txt_f.write(image_file.split('/')[-1].split('.')[0] + ':' + result + '\n')
txt_f.close() | from api import get_result
import os
import shutil
from glob import glob
from PIL import Image
if __name__ == '__main__':
image_files = glob('./test_images/*.*')
result_dir = './test_results'
if os.path.exists(result_dir):
shutil.rmtree(result_dir)
os.mkdir(result_dir)
txt_file = os.path.join(result_dir, 'result.txt')
txt_f = open(txt_file, 'w')
for image_file in sorted(image_files):
if ".gitkeep" in image_files:
continue
print("Finded file", image_file, end=" ")
result = get_result(Image.open(image_file))
print(":", result)
txt_f.write(image_file.split('/')[-1].split('.')[0] + ':' + result + '\n')
txt_f.close() | none | 1 | 2.666684 | 3 |
|
CIM14/ENTSOE/Equipment/Core/Curve.py | MaximeBaudette/PyCIM | 58 | 8355 | # Copyright (C) 2010-2011 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Equipment.Core.IdentifiedObject import IdentifiedObject
class Curve(IdentifiedObject):
"""A multi-purpose curve or functional relationship between an independent variable (X-axis) and dependent (Y-axis) variables.
"""
def __init__(self, y1Unit="A", curveStyle="straightLineYValues", xUnit="A", CurveDatas=None, *args, **kw_args):
"""Initialises a new 'Curve' instance.
@param y1Unit: The Y1-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
@param curveStyle: The style or shape of the curve. Values are: "straightLineYValues", "rampYValue", "constantYValue", "formula"
@param xUnit: The X-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
@param CurveDatas: The point data values that define a curve
"""
#: The Y1-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
self.y1Unit = y1Unit
#: The style or shape of the curve. Values are: "straightLineYValues", "rampYValue", "constantYValue", "formula"
self.curveStyle = curveStyle
#: The X-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
self.xUnit = xUnit
self._CurveDatas = []
self.CurveDatas = [] if CurveDatas is None else CurveDatas
super(Curve, self).__init__(*args, **kw_args)
_attrs = ["y1Unit", "curveStyle", "xUnit"]
_attr_types = {"y1Unit": str, "curveStyle": str, "xUnit": str}
_defaults = {"y1Unit": "A", "curveStyle": "straightLineYValues", "xUnit": "A"}
_enums = {"y1Unit": "UnitSymbol", "curveStyle": "CurveStyle", "xUnit": "UnitSymbol"}
_refs = ["CurveDatas"]
_many_refs = ["CurveDatas"]
def getCurveDatas(self):
"""The point data values that define a curve
"""
return self._CurveDatas
def setCurveDatas(self, value):
for x in self._CurveDatas:
x.Curve = None
for y in value:
y._Curve = self
self._CurveDatas = value
CurveDatas = property(getCurveDatas, setCurveDatas)
def addCurveDatas(self, *CurveDatas):
for obj in CurveDatas:
obj.Curve = self
def removeCurveDatas(self, *CurveDatas):
for obj in CurveDatas:
obj.Curve = None
| # Copyright (C) 2010-2011 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Equipment.Core.IdentifiedObject import IdentifiedObject
class Curve(IdentifiedObject):
"""A multi-purpose curve or functional relationship between an independent variable (X-axis) and dependent (Y-axis) variables.
"""
def __init__(self, y1Unit="A", curveStyle="straightLineYValues", xUnit="A", CurveDatas=None, *args, **kw_args):
"""Initialises a new 'Curve' instance.
@param y1Unit: The Y1-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
@param curveStyle: The style or shape of the curve. Values are: "straightLineYValues", "rampYValue", "constantYValue", "formula"
@param xUnit: The X-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
@param CurveDatas: The point data values that define a curve
"""
#: The Y1-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
self.y1Unit = y1Unit
#: The style or shape of the curve. Values are: "straightLineYValues", "rampYValue", "constantYValue", "formula"
self.curveStyle = curveStyle
#: The X-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
self.xUnit = xUnit
self._CurveDatas = []
self.CurveDatas = [] if CurveDatas is None else CurveDatas
super(Curve, self).__init__(*args, **kw_args)
_attrs = ["y1Unit", "curveStyle", "xUnit"]
_attr_types = {"y1Unit": str, "curveStyle": str, "xUnit": str}
_defaults = {"y1Unit": "A", "curveStyle": "straightLineYValues", "xUnit": "A"}
_enums = {"y1Unit": "UnitSymbol", "curveStyle": "CurveStyle", "xUnit": "UnitSymbol"}
_refs = ["CurveDatas"]
_many_refs = ["CurveDatas"]
def getCurveDatas(self):
"""The point data values that define a curve
"""
return self._CurveDatas
def setCurveDatas(self, value):
for x in self._CurveDatas:
x.Curve = None
for y in value:
y._Curve = self
self._CurveDatas = value
CurveDatas = property(getCurveDatas, setCurveDatas)
def addCurveDatas(self, *CurveDatas):
for obj in CurveDatas:
obj.Curve = self
def removeCurveDatas(self, *CurveDatas):
for obj in CurveDatas:
obj.Curve = None
| en | 0.534452 | # Copyright (C) 2010-2011 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. A multi-purpose curve or functional relationship between an independent variable (X-axis) and dependent (Y-axis) variables. Initialises a new 'Curve' instance. @param y1Unit: The Y1-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J" @param curveStyle: The style or shape of the curve. Values are: "straightLineYValues", "rampYValue", "constantYValue", "formula" @param xUnit: The X-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J" @param CurveDatas: The point data values that define a curve #: The Y1-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J" #: The style or shape of the curve. Values are: "straightLineYValues", "rampYValue", "constantYValue", "formula" #: The X-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J" The point data values that define a curve | 1.839324 | 2 |
fluent/syntax/errors.py | unclenachoduh/python-fluent | 0 | 8356 | from __future__ import unicode_literals
class ParseError(Exception):
def __init__(self, code, *args):
self.code = code
self.args = args
self.message = get_error_message(code, args)
def get_error_message(code, args):
if code == 'E00001':
return 'Generic error'
if code == 'E0002':
return 'Expected an entry start'
if code == 'E0003':
return 'Expected token: "{}"'.format(args[0])
if code == 'E0004':
return 'Expected a character from range: "{}"'.format(args[0])
if code == 'E0005':
msg = 'Expected message "{}" to have a value or attributes'
return msg.format(args[0])
if code == 'E0006':
msg = 'Expected term "{}" to have a value'
return msg.format(args[0])
if code == 'E0007':
return 'Keyword cannot end with a whitespace'
if code == 'E0008':
return 'The callee has to be a simple, upper-case identifier'
if code == 'E0009':
return 'The key has to be a simple identifier'
if code == 'E0010':
return 'Expected one of the variants to be marked as default (*)'
if code == 'E0011':
return 'Expected at least one variant after "->"'
if code == 'E0012':
return 'Expected value'
if code == 'E0013':
return 'Expected variant key'
if code == 'E0014':
return 'Expected literal'
if code == 'E0015':
return 'Only one variant can be marked as default (*)'
if code == 'E0016':
return 'Message references cannot be used as selectors'
if code == 'E0017':
return 'Variants cannot be used as selectors'
if code == 'E0018':
return 'Attributes of messages cannot be used as selectors'
if code == 'E0019':
return 'Attributes of terms cannot be used as placeables'
if code == 'E0020':
return 'Unterminated string expression'
if code == 'E0021':
return 'Positional arguments must not follow named arguments'
if code == 'E0022':
return 'Named arguments must be unique'
if code == 'E0023':
return 'VariantLists are only allowed inside of other VariantLists.'
if code == 'E0024':
return 'Cannot access variants of a message.'
if code == 'E0025':
return 'Unknown escape sequence: {}'.format(args[0])
if code == 'E0026':
return 'Invalid Unicode escape sequence: {}'.format(args[0])
return code
| from __future__ import unicode_literals
class ParseError(Exception):
def __init__(self, code, *args):
self.code = code
self.args = args
self.message = get_error_message(code, args)
def get_error_message(code, args):
if code == 'E00001':
return 'Generic error'
if code == 'E0002':
return 'Expected an entry start'
if code == 'E0003':
return 'Expected token: "{}"'.format(args[0])
if code == 'E0004':
return 'Expected a character from range: "{}"'.format(args[0])
if code == 'E0005':
msg = 'Expected message "{}" to have a value or attributes'
return msg.format(args[0])
if code == 'E0006':
msg = 'Expected term "{}" to have a value'
return msg.format(args[0])
if code == 'E0007':
return 'Keyword cannot end with a whitespace'
if code == 'E0008':
return 'The callee has to be a simple, upper-case identifier'
if code == 'E0009':
return 'The key has to be a simple identifier'
if code == 'E0010':
return 'Expected one of the variants to be marked as default (*)'
if code == 'E0011':
return 'Expected at least one variant after "->"'
if code == 'E0012':
return 'Expected value'
if code == 'E0013':
return 'Expected variant key'
if code == 'E0014':
return 'Expected literal'
if code == 'E0015':
return 'Only one variant can be marked as default (*)'
if code == 'E0016':
return 'Message references cannot be used as selectors'
if code == 'E0017':
return 'Variants cannot be used as selectors'
if code == 'E0018':
return 'Attributes of messages cannot be used as selectors'
if code == 'E0019':
return 'Attributes of terms cannot be used as placeables'
if code == 'E0020':
return 'Unterminated string expression'
if code == 'E0021':
return 'Positional arguments must not follow named arguments'
if code == 'E0022':
return 'Named arguments must be unique'
if code == 'E0023':
return 'VariantLists are only allowed inside of other VariantLists.'
if code == 'E0024':
return 'Cannot access variants of a message.'
if code == 'E0025':
return 'Unknown escape sequence: {}'.format(args[0])
if code == 'E0026':
return 'Invalid Unicode escape sequence: {}'.format(args[0])
return code
| none | 1 | 2.960001 | 3 |
|
tests/test_mag.py | jdddog/mag-archiver | 0 | 8357 | # Copyright 2020 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: <NAME>
import os
import unittest
from unittest.mock import patch
import pendulum
from azure.common import AzureMissingResourceHttpError
from azure.cosmosdb.table.tableservice import TableService
from azure.storage.blob import ContainerProperties
from mag_archiver.azure import create_table
from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, \
hide_if_not_none
class TestMag(unittest.TestCase):
def test_hide_if_not_none(self):
# Test that None is returned for None
value = hide_if_not_none(None)
self.assertEqual(value, None)
# Test that 'hidden' is returned: string
value = hide_if_not_none('hello world')
self.assertEqual(value, 'hidden')
# Test that 'hidden' is returned: integer
value = hide_if_not_none(123)
self.assertEqual(value, 'hidden')
def test_make_mag_query(self):
start_date = pendulum.datetime(year=2020, month=4, day=1)
end_date = pendulum.datetime(year=2020, month=5, day=1)
# No parameters
query = make_mag_query()
self.assertEqual(query, '')
# State parameter
query = make_mag_query(state=MagState.discovered)
self.assertEqual(query, "State eq 'discovered'")
query = make_mag_query(state=MagState.archived)
self.assertEqual(query, "State eq 'archived'")
query = make_mag_query(state=MagState.done)
self.assertEqual(query, "State eq 'done'")
# Start date parameter
query = make_mag_query(start_date=start_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate ge datetime'2020-04-01T00:00Z'")
# End date parameter
query = make_mag_query(end_date=end_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate lt datetime'2020-05-01T00:00Z'")
# Start date, end date and date type
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate ge datetime'2020-04-01T00:00Z' and ReleaseDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate ge datetime'2020-04-01T00:00Z' and DiscoveredDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate ge datetime'2020-04-01T00:00Z' and ArchivedDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate ge datetime'2020-04-01T00:00Z' and DoneDate lt "
"datetime'2020-05-01T00:00Z'")
# State, start date, end date and date type
query = make_mag_query(state=MagState.discovered, start_date=start_date, end_date=end_date,
date_type=MagDateType.discovered)
self.assertEqual(query, "State eq 'discovered' and DiscoveredDate ge datetime'2020-04-01T00:00Z' "
"and DiscoveredDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(state=MagState.archived, start_date=start_date, end_date=end_date,
date_type=MagDateType.archived)
self.assertEqual(query, "State eq 'archived' and ArchivedDate ge datetime'2020-04-01T00:00Z' "
"and ArchivedDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(state=MagState.done, start_date=start_date, end_date=end_date,
date_type=MagDateType.done)
self.assertEqual(query, "State eq 'done' and DoneDate ge datetime'2020-04-01T00:00Z' "
"and DoneDate lt datetime'2020-05-01T00:00Z'")
def make_mag_release(account_name: str, account_key: str, year: int, month: int, day: int):
min_date = pendulum.datetime(1601, 1, 1)
partition_key_ = 'mag'
row_key_ = f'mag-{year:0>4d}-{month:0>2d}-{day:0>2d}'
state_ = MagState.discovered
task_ = MagTask.not_started
release_date_ = pendulum.datetime(year=year, month=month, day=day)
source_container_ = row_key_
source_container_last_modified_ = pendulum.datetime(year=year, month=month, day=day, hour=1)
release_container_ = ''
release_path_ = ''
discovered_date_ = pendulum.datetime(year=year, month=month, day=day, hour=2)
archived_date_ = min_date
done_date_ = min_date
return MagRelease(partition_key_, row_key_, state_, task_, release_date_, source_container_,
source_container_last_modified_, release_container_, release_path_, discovered_date_,
archived_date_, done_date_, account_name=account_name, account_key=account_key)
class TestMagRelease(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMagRelease, self).__init__(*args, **kwargs)
self.account_name = os.getenv('STORAGE_ACCOUNT_NAME')
self.account_key = os.getenv('STORAGE_ACCOUNT_KEY')
create_table(self.account_name, self.account_key, MagRelease.TABLE_NAME)
def test_secrets_hidden(self):
# Check that account key is hidden
account_name = 'myaccountname'
secret = 'secret'
# Check that account_key and sas_token are hidden
release = make_mag_release(account_name, secret, 2020, 1, 1)
self.assertIn('account_key=hidden', release.__repr__())
self.assertNotIn(secret, release.__str__())
self.assertNotIn(secret, release.__repr__())
# Check that account_key is None
release = make_mag_release(account_name, None, 2020, 1, 1)
self.assertIn('account_key=None', release.__repr__())
def test_create(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
try:
success = release.create()
self.assertTrue(success)
finally:
release.delete()
def test_delete(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
# Check that we can create and then delete
release.create()
release.delete()
# Check that second delete fails
with self.assertRaises(AzureMissingResourceHttpError):
release.delete()
def test_update(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
try:
release.create()
# Update release
release.state = MagState.archived
release.archived_date = pendulum.utcnow().microsecond_(0)
release.update()
# Verify that release is updated
service = TableService(account_name=self.account_name, account_key=self.account_key)
entity = service.get_entity(MagRelease.TABLE_NAME, release.partition_key, release.row_key)
updated_release = MagRelease.from_entity(entity)
self.assertEqual(release.state, updated_release.state)
self.assertEqual(release.archived_date, updated_release.archived_date)
finally:
release.delete()
def make_containers():
containers = []
cp1 = ContainerProperties()
cp1.name = 'mag-2020-04-17'
cp1.last_modified = pendulum.datetime(year=2020, month=4, day=18)
containers.append(cp1)
cp3 = ContainerProperties()
cp3.name = 'mag-2020-05-01'
cp3.last_modified = pendulum.datetime(year=2020, month=5, day=1)
containers.append(cp3)
cp2 = ContainerProperties()
cp2.name = 'mag-2020-04-24'
cp2.last_modified = pendulum.datetime(year=2020, month=4, day=25)
containers.append(cp2)
return containers
class TestMagArchiverClient(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMagArchiverClient, self).__init__(*args, **kwargs)
self.account_name = os.getenv('STORAGE_ACCOUNT_NAME')
self.account_key = os.getenv('STORAGE_ACCOUNT_KEY')
create_table(self.account_name, self.account_key, MagRelease.TABLE_NAME)
def test_secrets_hidden(self):
# Check that account key is hidden
account_name = 'myaccountname'
secret = 'secret'
# Check that account_key and sas_token are hidden
client = MagArchiverClient(account_name=account_name, account_key=secret, sas_token=secret)
expected = f'MagArchiverClient(account_name={account_name}, account_key=hidden, sas_token=hidden)'
self.assertEqual(client.__str__(), expected)
self.assertEqual(client.__repr__(), expected)
self.assertNotIn(secret, client.__str__())
self.assertNotIn(secret, client.__repr__())
# Check that account_key and sas_token are None
client = MagArchiverClient(account_name=account_name)
expected = f'MagArchiverClient(account_name={account_name}, account_key=None, sas_token=None)'
self.assertEqual(client.__str__(), expected)
self.assertEqual(client.__repr__(), expected)
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_list_containers(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, minute=10)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Test that 2 containers are returned when last_modified_thresh=1
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers_out = client.list_containers(last_modified_thresh=1)
self.assertEqual(len(containers_out), 2)
# Test that 3 containers are returned when last_modified_thresh=0
containers_out = client.list_containers(last_modified_thresh=0)
self.assertEqual(len(containers_out), 3)
# Test sort order reverse=False
self.assertEqual(containers_in[0].name, containers_out[0].name)
self.assertEqual(containers_in[2].name, containers_out[1].name)
self.assertEqual(containers_in[1].name, containers_out[2].name)
# Test sort order reverse=True
containers_out = client.list_containers(last_modified_thresh=0, reverse=True)
self.assertEqual(len(containers_out), 3)
self.assertEqual(containers_in[1].name, containers_out[0].name)
self.assertEqual(containers_in[2].name, containers_out[1].name)
self.assertEqual(containers_in[0].name, containers_out[2].name)
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_update_releases(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, minute=10)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Mock fetching of containers
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers = client.list_containers(last_modified_thresh=1)
try:
# Update releases based on containers
num_updated, num_errors = client.update_releases(containers)
self.assertEqual(num_updated, 2)
self.assertEqual(num_errors, 0)
finally:
# Clean up
service = TableService(account_name=self.account_name, account_key=self.account_key)
for container in containers:
service.delete_entity(MagRelease.TABLE_NAME, 'mag', container.name.replace("mag-", ""))
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_list_releases(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, hour=1)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Mock fetching of containers
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers = client.list_containers(last_modified_thresh=1)
try:
# Update releases based on containers
num_updated, num_errors = client.update_releases(containers)
self.assertEqual(num_updated, 3)
self.assertEqual(num_errors, 0)
# Two releases
start_date = pendulum.datetime(year=2020, month=4, day=17)
end_date = pendulum.datetime(year=2020, month=5, day=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release)
self.assertEqual(len(releases), 2)
# 1 release
start_date = pendulum.datetime(year=2020, month=4, day=17, minute=1)
end_date = pendulum.datetime(year=2020, month=5, day=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release)
self.assertEqual(len(releases), 1)
# Three releases
start_date = pendulum.datetime(year=2020, month=4, day=17)
end_date = pendulum.datetime(year=2020, month=5, day=1, minute=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release, reverse=False)
self.assertEqual(len(releases), 3)
# Sorting reverse=False
self.assertEqual(releases[0].row_key, '2020-04-17')
self.assertEqual(releases[1].row_key, '2020-04-24')
self.assertEqual(releases[2].row_key, '2020-05-01')
# Sorting reverse=True
releases = client.list_releases(start_date=start_date, end_date=end_date,
state=MagState.discovered, date_type=MagDateType.release,
reverse=True)
self.assertEqual(releases[0].row_key, '2020-05-01')
self.assertEqual(releases[1].row_key, '2020-04-24')
self.assertEqual(releases[2].row_key, '2020-04-17')
finally:
# Clean up
service = TableService(account_name=self.account_name, account_key=self.account_key)
for container in containers:
service.delete_entity(MagRelease.TABLE_NAME, 'mag', container.name.replace("mag-", ""))
| # Copyright 2020 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: <NAME>
import os
import unittest
from unittest.mock import patch
import pendulum
from azure.common import AzureMissingResourceHttpError
from azure.cosmosdb.table.tableservice import TableService
from azure.storage.blob import ContainerProperties
from mag_archiver.azure import create_table
from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, \
hide_if_not_none
class TestMag(unittest.TestCase):
def test_hide_if_not_none(self):
# Test that None is returned for None
value = hide_if_not_none(None)
self.assertEqual(value, None)
# Test that 'hidden' is returned: string
value = hide_if_not_none('hello world')
self.assertEqual(value, 'hidden')
# Test that 'hidden' is returned: integer
value = hide_if_not_none(123)
self.assertEqual(value, 'hidden')
def test_make_mag_query(self):
start_date = pendulum.datetime(year=2020, month=4, day=1)
end_date = pendulum.datetime(year=2020, month=5, day=1)
# No parameters
query = make_mag_query()
self.assertEqual(query, '')
# State parameter
query = make_mag_query(state=MagState.discovered)
self.assertEqual(query, "State eq 'discovered'")
query = make_mag_query(state=MagState.archived)
self.assertEqual(query, "State eq 'archived'")
query = make_mag_query(state=MagState.done)
self.assertEqual(query, "State eq 'done'")
# Start date parameter
query = make_mag_query(start_date=start_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate ge datetime'2020-04-01T00:00Z'")
# End date parameter
query = make_mag_query(end_date=end_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate lt datetime'2020-05-01T00:00Z'")
# Start date, end date and date type
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate ge datetime'2020-04-01T00:00Z' and ReleaseDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate ge datetime'2020-04-01T00:00Z' and DiscoveredDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate ge datetime'2020-04-01T00:00Z' and ArchivedDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate ge datetime'2020-04-01T00:00Z' and DoneDate lt "
"datetime'2020-05-01T00:00Z'")
# State, start date, end date and date type
query = make_mag_query(state=MagState.discovered, start_date=start_date, end_date=end_date,
date_type=MagDateType.discovered)
self.assertEqual(query, "State eq 'discovered' and DiscoveredDate ge datetime'2020-04-01T00:00Z' "
"and DiscoveredDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(state=MagState.archived, start_date=start_date, end_date=end_date,
date_type=MagDateType.archived)
self.assertEqual(query, "State eq 'archived' and ArchivedDate ge datetime'2020-04-01T00:00Z' "
"and ArchivedDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(state=MagState.done, start_date=start_date, end_date=end_date,
date_type=MagDateType.done)
self.assertEqual(query, "State eq 'done' and DoneDate ge datetime'2020-04-01T00:00Z' "
"and DoneDate lt datetime'2020-05-01T00:00Z'")
def make_mag_release(account_name: str, account_key: str, year: int, month: int, day: int):
min_date = pendulum.datetime(1601, 1, 1)
partition_key_ = 'mag'
row_key_ = f'mag-{year:0>4d}-{month:0>2d}-{day:0>2d}'
state_ = MagState.discovered
task_ = MagTask.not_started
release_date_ = pendulum.datetime(year=year, month=month, day=day)
source_container_ = row_key_
source_container_last_modified_ = pendulum.datetime(year=year, month=month, day=day, hour=1)
release_container_ = ''
release_path_ = ''
discovered_date_ = pendulum.datetime(year=year, month=month, day=day, hour=2)
archived_date_ = min_date
done_date_ = min_date
return MagRelease(partition_key_, row_key_, state_, task_, release_date_, source_container_,
source_container_last_modified_, release_container_, release_path_, discovered_date_,
archived_date_, done_date_, account_name=account_name, account_key=account_key)
class TestMagRelease(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMagRelease, self).__init__(*args, **kwargs)
self.account_name = os.getenv('STORAGE_ACCOUNT_NAME')
self.account_key = os.getenv('STORAGE_ACCOUNT_KEY')
create_table(self.account_name, self.account_key, MagRelease.TABLE_NAME)
def test_secrets_hidden(self):
# Check that account key is hidden
account_name = 'myaccountname'
secret = 'secret'
# Check that account_key and sas_token are hidden
release = make_mag_release(account_name, secret, 2020, 1, 1)
self.assertIn('account_key=hidden', release.__repr__())
self.assertNotIn(secret, release.__str__())
self.assertNotIn(secret, release.__repr__())
# Check that account_key is None
release = make_mag_release(account_name, None, 2020, 1, 1)
self.assertIn('account_key=None', release.__repr__())
def test_create(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
try:
success = release.create()
self.assertTrue(success)
finally:
release.delete()
def test_delete(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
# Check that we can create and then delete
release.create()
release.delete()
# Check that second delete fails
with self.assertRaises(AzureMissingResourceHttpError):
release.delete()
def test_update(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
try:
release.create()
# Update release
release.state = MagState.archived
release.archived_date = pendulum.utcnow().microsecond_(0)
release.update()
# Verify that release is updated
service = TableService(account_name=self.account_name, account_key=self.account_key)
entity = service.get_entity(MagRelease.TABLE_NAME, release.partition_key, release.row_key)
updated_release = MagRelease.from_entity(entity)
self.assertEqual(release.state, updated_release.state)
self.assertEqual(release.archived_date, updated_release.archived_date)
finally:
release.delete()
def make_containers():
containers = []
cp1 = ContainerProperties()
cp1.name = 'mag-2020-04-17'
cp1.last_modified = pendulum.datetime(year=2020, month=4, day=18)
containers.append(cp1)
cp3 = ContainerProperties()
cp3.name = 'mag-2020-05-01'
cp3.last_modified = pendulum.datetime(year=2020, month=5, day=1)
containers.append(cp3)
cp2 = ContainerProperties()
cp2.name = 'mag-2020-04-24'
cp2.last_modified = pendulum.datetime(year=2020, month=4, day=25)
containers.append(cp2)
return containers
class TestMagArchiverClient(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMagArchiverClient, self).__init__(*args, **kwargs)
self.account_name = os.getenv('STORAGE_ACCOUNT_NAME')
self.account_key = os.getenv('STORAGE_ACCOUNT_KEY')
create_table(self.account_name, self.account_key, MagRelease.TABLE_NAME)
def test_secrets_hidden(self):
# Check that account key is hidden
account_name = 'myaccountname'
secret = 'secret'
# Check that account_key and sas_token are hidden
client = MagArchiverClient(account_name=account_name, account_key=secret, sas_token=secret)
expected = f'MagArchiverClient(account_name={account_name}, account_key=hidden, sas_token=hidden)'
self.assertEqual(client.__str__(), expected)
self.assertEqual(client.__repr__(), expected)
self.assertNotIn(secret, client.__str__())
self.assertNotIn(secret, client.__repr__())
# Check that account_key and sas_token are None
client = MagArchiverClient(account_name=account_name)
expected = f'MagArchiverClient(account_name={account_name}, account_key=None, sas_token=None)'
self.assertEqual(client.__str__(), expected)
self.assertEqual(client.__repr__(), expected)
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_list_containers(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, minute=10)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Test that 2 containers are returned when last_modified_thresh=1
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers_out = client.list_containers(last_modified_thresh=1)
self.assertEqual(len(containers_out), 2)
# Test that 3 containers are returned when last_modified_thresh=0
containers_out = client.list_containers(last_modified_thresh=0)
self.assertEqual(len(containers_out), 3)
# Test sort order reverse=False
self.assertEqual(containers_in[0].name, containers_out[0].name)
self.assertEqual(containers_in[2].name, containers_out[1].name)
self.assertEqual(containers_in[1].name, containers_out[2].name)
# Test sort order reverse=True
containers_out = client.list_containers(last_modified_thresh=0, reverse=True)
self.assertEqual(len(containers_out), 3)
self.assertEqual(containers_in[1].name, containers_out[0].name)
self.assertEqual(containers_in[2].name, containers_out[1].name)
self.assertEqual(containers_in[0].name, containers_out[2].name)
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_update_releases(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, minute=10)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Mock fetching of containers
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers = client.list_containers(last_modified_thresh=1)
try:
# Update releases based on containers
num_updated, num_errors = client.update_releases(containers)
self.assertEqual(num_updated, 2)
self.assertEqual(num_errors, 0)
finally:
# Clean up
service = TableService(account_name=self.account_name, account_key=self.account_key)
for container in containers:
service.delete_entity(MagRelease.TABLE_NAME, 'mag', container.name.replace("mag-", ""))
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_list_releases(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, hour=1)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Mock fetching of containers
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers = client.list_containers(last_modified_thresh=1)
try:
# Update releases based on containers
num_updated, num_errors = client.update_releases(containers)
self.assertEqual(num_updated, 3)
self.assertEqual(num_errors, 0)
# Two releases
start_date = pendulum.datetime(year=2020, month=4, day=17)
end_date = pendulum.datetime(year=2020, month=5, day=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release)
self.assertEqual(len(releases), 2)
# 1 release
start_date = pendulum.datetime(year=2020, month=4, day=17, minute=1)
end_date = pendulum.datetime(year=2020, month=5, day=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release)
self.assertEqual(len(releases), 1)
# Three releases
start_date = pendulum.datetime(year=2020, month=4, day=17)
end_date = pendulum.datetime(year=2020, month=5, day=1, minute=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release, reverse=False)
self.assertEqual(len(releases), 3)
# Sorting reverse=False
self.assertEqual(releases[0].row_key, '2020-04-17')
self.assertEqual(releases[1].row_key, '2020-04-24')
self.assertEqual(releases[2].row_key, '2020-05-01')
# Sorting reverse=True
releases = client.list_releases(start_date=start_date, end_date=end_date,
state=MagState.discovered, date_type=MagDateType.release,
reverse=True)
self.assertEqual(releases[0].row_key, '2020-05-01')
self.assertEqual(releases[1].row_key, '2020-04-24')
self.assertEqual(releases[2].row_key, '2020-04-17')
finally:
# Clean up
service = TableService(account_name=self.account_name, account_key=self.account_key)
for container in containers:
service.delete_entity(MagRelease.TABLE_NAME, 'mag', container.name.replace("mag-", ""))
| en | 0.833932 | # Copyright 2020 Curtin University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Author: <NAME> # Test that None is returned for None # Test that 'hidden' is returned: string # Test that 'hidden' is returned: integer # No parameters # State parameter # Start date parameter # End date parameter # Start date, end date and date type # State, start date, end date and date type # Check that account key is hidden # Check that account_key and sas_token are hidden # Check that account_key is None # Check that we can create and then delete # Check that second delete fails # Update release # Verify that release is updated # Check that account key is hidden # Check that account_key and sas_token are hidden # Check that account_key and sas_token are None # Mock time # Mock containers # Test that 2 containers are returned when last_modified_thresh=1 # Test that 3 containers are returned when last_modified_thresh=0 # Test sort order reverse=False # Test sort order reverse=True # Mock time # Mock containers # Mock fetching of containers # Update releases based on containers # Clean up # Mock time # Mock containers # Mock fetching of containers # Update releases based on containers # Two releases # 1 release # Three releases # Sorting reverse=False # Sorting reverse=True # Clean up | 2.071854 | 2 |
twitterinfrastructure/CH-Data-Public.py | jacob-heglund/socialsensing-jh | 0 | 8358 | '''
Created on Mar 22, 2018
Edited on Jan 11, 2019
@author: npvance2
@author: curtisd2
Variables that will need to be edited/personalized:
monitorID in Variables() (line 27)
projectStartDate in Variables() (line 28)
projectEndDate in Variables() (line 29)
authToken in getAuthToken() (line 49)
consumer_key in twitterAPI() (line 62)
consumer_secret in twitterAPI() (line 63)
access_token in twitterAPI() (line 64)
access_secret in twitterAPI() (line 65)
'''
from datetime import date, timedelta
import urllib.request
import json
import csv
import tweepy
from tweepy import OAuthHandler
def Variables():
monitorID = "9926183772" # The numerical ID for your Crimson Hexagon monitor
startDate = "yyyy-mm-dd" # Date must be in yyyy-mm-dd format
endDate = "yyyy-mm-dd" # Date must be in yyyy-mm-dd format
variableMap = {}
variableMap['monitorID'] = monitorID
variableMap['startDate'] = startDate
variableMap['endDate'] = endDate
return variableMap
def getURL(): #provides URL for Crimson API
urlStart = "https://api.crimsonhexagon.com/api"
return urlStart
###########
#
# You'll need to generate your own Crimson API key/token from here:
# https://apidocs.crimsonhexagon.com/reference
#
###########
def getAuthToken(): #provides auth token needed to access Crimson API
authToken = ''
authToken = "&auth="+authToken
return authToken
###########
#
# You'll need to add your own Twitter API keys here.
# Instructions on generating API keys: https://developer.twitter.com/en/docs/basics/authentication/guides/access-tokens.html
# API reference guide: https://developer.twitter.com/en/docs/api-reference-index.html
#
###########
def twitterAPI(): #Provides access keys for Twitter API
consumer_key = '2S1Z7Giq0oOf3w0R0sJUPnLFx'
consumer_secret = '<KEY>'
access_token = '<KEY>'
access_secret = '<KEY>'
if (consumer_key == '') or (consumer_secret =='') or (access_token =='') or (access_secret ==''):
print("Not all Twitter keys have been entered, please add them to the script and try again")
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
return api
def getTwitterURL(): #provides URL for Twitter api
urlStart = "https://api.twitter.com/1.1/statuses/lookup.json?id="
return urlStart
def DatePull(startdate, enddate):
listArray = []
startdate = date(int(startdate[0:4]), int(startdate[5:7]), int(startdate[8:10]))
enddate = date(int(enddate[0:4]), int(enddate[5:7]), int(enddate[8:10]))
while startdate <= enddate:
listArray.append(str(startdate))
startdate += timedelta(days=1)
return listArray
def main():
monitorID = Variables()['monitorID']
projectStartDate = Variables()['startDate']
projectEndDate = Variables()['endDate']
fPath = "Monitor-"+monitorID+'-from-'+projectStartDate+'-to-'+projectEndDate+'.csv'
lineArray = DatePull(projectStartDate, projectEndDate)
print("------------------------------")
print("MonitorID is "+monitorID)
print(lineArray[0],lineArray[-1])
with open(fPath, 'w', newline = '', encoding = 'utf-8') as f:
writer = csv.writer(f)
header = ["PostType","PostDate","PostTime","URL","TweetID","Contents","RetweetCount","FavoriteCount","Location","Language","Sentiment","NeutralScore","PositiveScore","NegativeScore","Followers","Friends","Author","AuthorGender","AuthorTweets"]
writer.writerow(header)
for i in range(len(lineArray)-1):
print(lineArray[i])
startDate = lineArray[i]
endDate = lineArray[i+1]
dates = "&start="+startDate+"&end="+endDate #Combines start and end date into format needed for API call
urlStart = getURL() #Gets URL
authToken = getAuthToken() #Gets auth token
endpoint = "/monitor/posts?id="; #endpoint needed for this query
extendLimit = "&extendLimit=true" #extends call number from 500 to 10,000
fullContents = "&fullContents=true" #Brings back full contents for Blog and Tumblr posts which are usually truncated around search keywords. This can occasionally disrupt CSV formatting.
urlData = urlStart+endpoint+monitorID+authToken+dates+extendLimit+fullContents #Combines all API calls parts into full URL
webURL = urllib.request.urlopen(urlData)
if (webURL.getcode() == 200):
with open(fPath, 'a', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
data = webURL.read().decode('utf8')
theJSON = json.loads(data)
postDates = [] #These initialize the attributes of the final output
postTimes = []
urls = []
contents = []
authors = []
authorGenders = []
locations = []
languages = []
postTypes = []
sentiments = []
neutralScore = []
positiveScore = []
negativeScore = []
tweetIDs = []
followers = []
friends = []
retweetCounts = []
favoritesCount = []
statusesCount = []
tweetCount = 0
tempTweetIDs = []
api = twitterAPI()
c = 0
for i in theJSON["posts"]:
postDates.append("")
postTimes.append("")
if ('date' in i): #identifies date posted
tempDate = str(i["date"])
dateTime = tempDate.split("T")
postDates[c] = dateTime[0]
postTimes[c] = dateTime[1]
urls.append(i["url"])
contents.append("")
if ('contents' in i): #identifies post contents
contents[c] = i["contents"].replace(",","").replace("\n"," ") #replaces commas and new lines to facilitate CSV formatting, this occasionally missed new lines in some blog posts which I'm working to fix
authors.append("")
if ('author' in i): #identifies author
authors[c] = i["author"].replace(",","")
authorGenders.append("")
if ('authorGender' in i): #identifies author gender
authorGenders[c] = i["authorGender"]
locations.append("")
if ('location' in i): #identifies location
locations[c] = i["location"].replace(",","")
languages.append("")
if ('language' in i): #identifies language specified in the author's profile
languages[c] = i["language"]
postTypes.append(i["type"]) #identifies the type of post, i.e. Twitter, Tumblr, Blog
tweetIDs.append("")
followers.append("")
friends.append("")
retweetCounts.append("")
favoritesCount.append("")
statusesCount.append("")
if postTypes[c] == "Twitter": #if the post type is Twitter it goes through more processing
tweetCount = tweetCount + 1 #counts number of tweets
tweetSplit = urls[c].split("status/") #splits URL to get tweetID
tweetIDs[c] = tweetSplit[1]
tempTweetIDs.append(tweetIDs[c])
if tweetCount == 100: #the max number of TweetIDs in one API call is 100 so a call is run every 100 tweets identified
tweepys = api.statuses_lookup(id_=tempTweetIDs) #call to Twitter API
for tweet in tweepys:
tempID = tweet.id_str #finds tweetsID
postMatch = 0
for idMatch in tweetIDs:
if idMatch==tempID: #matches tweetID in Twitter API call to tweetID stored from Crimson API
tempDate = str(tweet.created_at).replace(" "," ") #These all fill the matching Crimson attributes to those found in the Twitter API
dateTime = tempDate.split(" ")
postDates[postMatch] = dateTime[0]
postTimes[postMatch] = dateTime[1]
contents[postMatch] = tweet.text.replace(",","")
authors[postMatch] = tweet.author.screen_name
followers[postMatch] = str(tweet.author.followers_count)
friends[postMatch] = str(tweet.author.friends_count)
retweetCounts[postMatch] = str(tweet.retweet_count)
favoritesCount[postMatch] = str(tweet.favorite_count)
statusesCount[postMatch] = str(tweet.author.statuses_count)
postMatch = postMatch + 1
tweetCount = 0 #clears tweet count for a new 100
tempTweetIDs = [] #clears tweetIDs for next call
sentiments.append("")
neutralScore.append("")
positiveScore.append("")
negativeScore.append("")
if ('categoryScores' in i): #finds sentiment value and matching attribute
for l in i["categoryScores"]:
catName = l["categoryName"]
if catName == "Basic Neutral":
neutralScore[c] = l["score"]
elif catName =="Basic Positive":
positiveScore[c] = l["score"]
elif catName == "Basic Negative":
negativeScore[c] = l["score"]
if neutralScore[c] > positiveScore[c] and neutralScore[c] > negativeScore[c]:
sentiments[c] = "Basic Neutral"
if positiveScore[c] > neutralScore[c] and positiveScore[c] > negativeScore[c]:
sentiments[c] = "Basic Positive"
if negativeScore[c] > positiveScore[c] and negativeScore[c] > neutralScore[c]:
sentiments[c] = "Basic Negative"
c = c + 1
if len(tempTweetIDs) != 0: #after loop the Twitter API call must run one more time to clean up all the tweets since the last 100
try:
tweepys = api.statuses_lookup(id_=tempTweetIDs)
for tweet in tweepys:
tempID = tweet.id_str
postMatch = 0
for idMatch in tweetIDs:
if idMatch==tempID:
tempDate = str(tweet.created_at).replace(" "," ")
dateTime = tempDate.split(" ")
postDates[postMatch] = dateTime[0]
postTimes[postMatch] = dateTime[1]
contents[postMatch] = tweet.text.replace(",","")
authors[postMatch] = tweet.author.screen_name
followers[postMatch] = str(tweet.author.followers_count)
friends[postMatch] = str(tweet.author.friends_count)
retweetCounts[postMatch] = str(tweet.retweet_count)
favoritesCount[postMatch] = str(tweet.favorite_count)
statusesCount[postMatch] = str(tweet.author.statuses_count)
postMatch = postMatch + 1
tweetCount = 0
except:
print("Tweepy error: skipping cleanup")
pC = 0
for pDate in postDates: #iterates through the word lists and prints matching posts to CSV
csvRow=[postTypes[pC], pDate, postTimes[pC], urls[pC], str(tweetIDs[pC]), contents[pC].replace("\n"," "), retweetCounts[pC], favoritesCount[pC], locations[pC], languages[pC], sentiments[pC], str(neutralScore[pC]), str(positiveScore[pC]), str(negativeScore[pC]), followers[pC], friends[pC], authors[pC], authorGenders[pC], statusesCount[pC]]
writer.writerow(csvRow)
pC = pC + 1
else:
print("Server Error, No Data" + str(webURL.getcode())) #displays error if Crimson URL fails
if __name__ == '__main__':
main()
| '''
Created on Mar 22, 2018
Edited on Jan 11, 2019
@author: npvance2
@author: curtisd2
Variables that will need to be edited/personalized:
monitorID in Variables() (line 27)
projectStartDate in Variables() (line 28)
projectEndDate in Variables() (line 29)
authToken in getAuthToken() (line 49)
consumer_key in twitterAPI() (line 62)
consumer_secret in twitterAPI() (line 63)
access_token in twitterAPI() (line 64)
access_secret in twitterAPI() (line 65)
'''
from datetime import date, timedelta
import urllib.request
import json
import csv
import tweepy
from tweepy import OAuthHandler
def Variables():
monitorID = "9926183772" # The numerical ID for your Crimson Hexagon monitor
startDate = "yyyy-mm-dd" # Date must be in yyyy-mm-dd format
endDate = "yyyy-mm-dd" # Date must be in yyyy-mm-dd format
variableMap = {}
variableMap['monitorID'] = monitorID
variableMap['startDate'] = startDate
variableMap['endDate'] = endDate
return variableMap
def getURL(): #provides URL for Crimson API
urlStart = "https://api.crimsonhexagon.com/api"
return urlStart
###########
#
# You'll need to generate your own Crimson API key/token from here:
# https://apidocs.crimsonhexagon.com/reference
#
###########
def getAuthToken(): #provides auth token needed to access Crimson API
authToken = ''
authToken = "&auth="+authToken
return authToken
###########
#
# You'll need to add your own Twitter API keys here.
# Instructions on generating API keys: https://developer.twitter.com/en/docs/basics/authentication/guides/access-tokens.html
# API reference guide: https://developer.twitter.com/en/docs/api-reference-index.html
#
###########
def twitterAPI(): #Provides access keys for Twitter API
consumer_key = '2S1Z7Giq0oOf3w0R0sJUPnLFx'
consumer_secret = '<KEY>'
access_token = '<KEY>'
access_secret = '<KEY>'
if (consumer_key == '') or (consumer_secret =='') or (access_token =='') or (access_secret ==''):
print("Not all Twitter keys have been entered, please add them to the script and try again")
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
return api
def getTwitterURL(): #provides URL for Twitter api
urlStart = "https://api.twitter.com/1.1/statuses/lookup.json?id="
return urlStart
def DatePull(startdate, enddate):
listArray = []
startdate = date(int(startdate[0:4]), int(startdate[5:7]), int(startdate[8:10]))
enddate = date(int(enddate[0:4]), int(enddate[5:7]), int(enddate[8:10]))
while startdate <= enddate:
listArray.append(str(startdate))
startdate += timedelta(days=1)
return listArray
def main():
monitorID = Variables()['monitorID']
projectStartDate = Variables()['startDate']
projectEndDate = Variables()['endDate']
fPath = "Monitor-"+monitorID+'-from-'+projectStartDate+'-to-'+projectEndDate+'.csv'
lineArray = DatePull(projectStartDate, projectEndDate)
print("------------------------------")
print("MonitorID is "+monitorID)
print(lineArray[0],lineArray[-1])
with open(fPath, 'w', newline = '', encoding = 'utf-8') as f:
writer = csv.writer(f)
header = ["PostType","PostDate","PostTime","URL","TweetID","Contents","RetweetCount","FavoriteCount","Location","Language","Sentiment","NeutralScore","PositiveScore","NegativeScore","Followers","Friends","Author","AuthorGender","AuthorTweets"]
writer.writerow(header)
for i in range(len(lineArray)-1):
print(lineArray[i])
startDate = lineArray[i]
endDate = lineArray[i+1]
dates = "&start="+startDate+"&end="+endDate #Combines start and end date into format needed for API call
urlStart = getURL() #Gets URL
authToken = getAuthToken() #Gets auth token
endpoint = "/monitor/posts?id="; #endpoint needed for this query
extendLimit = "&extendLimit=true" #extends call number from 500 to 10,000
fullContents = "&fullContents=true" #Brings back full contents for Blog and Tumblr posts which are usually truncated around search keywords. This can occasionally disrupt CSV formatting.
urlData = urlStart+endpoint+monitorID+authToken+dates+extendLimit+fullContents #Combines all API calls parts into full URL
webURL = urllib.request.urlopen(urlData)
if (webURL.getcode() == 200):
with open(fPath, 'a', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
data = webURL.read().decode('utf8')
theJSON = json.loads(data)
postDates = [] #These initialize the attributes of the final output
postTimes = []
urls = []
contents = []
authors = []
authorGenders = []
locations = []
languages = []
postTypes = []
sentiments = []
neutralScore = []
positiveScore = []
negativeScore = []
tweetIDs = []
followers = []
friends = []
retweetCounts = []
favoritesCount = []
statusesCount = []
tweetCount = 0
tempTweetIDs = []
api = twitterAPI()
c = 0
for i in theJSON["posts"]:
postDates.append("")
postTimes.append("")
if ('date' in i): #identifies date posted
tempDate = str(i["date"])
dateTime = tempDate.split("T")
postDates[c] = dateTime[0]
postTimes[c] = dateTime[1]
urls.append(i["url"])
contents.append("")
if ('contents' in i): #identifies post contents
contents[c] = i["contents"].replace(",","").replace("\n"," ") #replaces commas and new lines to facilitate CSV formatting, this occasionally missed new lines in some blog posts which I'm working to fix
authors.append("")
if ('author' in i): #identifies author
authors[c] = i["author"].replace(",","")
authorGenders.append("")
if ('authorGender' in i): #identifies author gender
authorGenders[c] = i["authorGender"]
locations.append("")
if ('location' in i): #identifies location
locations[c] = i["location"].replace(",","")
languages.append("")
if ('language' in i): #identifies language specified in the author's profile
languages[c] = i["language"]
postTypes.append(i["type"]) #identifies the type of post, i.e. Twitter, Tumblr, Blog
tweetIDs.append("")
followers.append("")
friends.append("")
retweetCounts.append("")
favoritesCount.append("")
statusesCount.append("")
if postTypes[c] == "Twitter": #if the post type is Twitter it goes through more processing
tweetCount = tweetCount + 1 #counts number of tweets
tweetSplit = urls[c].split("status/") #splits URL to get tweetID
tweetIDs[c] = tweetSplit[1]
tempTweetIDs.append(tweetIDs[c])
if tweetCount == 100: #the max number of TweetIDs in one API call is 100 so a call is run every 100 tweets identified
tweepys = api.statuses_lookup(id_=tempTweetIDs) #call to Twitter API
for tweet in tweepys:
tempID = tweet.id_str #finds tweetsID
postMatch = 0
for idMatch in tweetIDs:
if idMatch==tempID: #matches tweetID in Twitter API call to tweetID stored from Crimson API
tempDate = str(tweet.created_at).replace(" "," ") #These all fill the matching Crimson attributes to those found in the Twitter API
dateTime = tempDate.split(" ")
postDates[postMatch] = dateTime[0]
postTimes[postMatch] = dateTime[1]
contents[postMatch] = tweet.text.replace(",","")
authors[postMatch] = tweet.author.screen_name
followers[postMatch] = str(tweet.author.followers_count)
friends[postMatch] = str(tweet.author.friends_count)
retweetCounts[postMatch] = str(tweet.retweet_count)
favoritesCount[postMatch] = str(tweet.favorite_count)
statusesCount[postMatch] = str(tweet.author.statuses_count)
postMatch = postMatch + 1
tweetCount = 0 #clears tweet count for a new 100
tempTweetIDs = [] #clears tweetIDs for next call
sentiments.append("")
neutralScore.append("")
positiveScore.append("")
negativeScore.append("")
if ('categoryScores' in i): #finds sentiment value and matching attribute
for l in i["categoryScores"]:
catName = l["categoryName"]
if catName == "Basic Neutral":
neutralScore[c] = l["score"]
elif catName =="Basic Positive":
positiveScore[c] = l["score"]
elif catName == "Basic Negative":
negativeScore[c] = l["score"]
if neutralScore[c] > positiveScore[c] and neutralScore[c] > negativeScore[c]:
sentiments[c] = "Basic Neutral"
if positiveScore[c] > neutralScore[c] and positiveScore[c] > negativeScore[c]:
sentiments[c] = "Basic Positive"
if negativeScore[c] > positiveScore[c] and negativeScore[c] > neutralScore[c]:
sentiments[c] = "Basic Negative"
c = c + 1
if len(tempTweetIDs) != 0: #after loop the Twitter API call must run one more time to clean up all the tweets since the last 100
try:
tweepys = api.statuses_lookup(id_=tempTweetIDs)
for tweet in tweepys:
tempID = tweet.id_str
postMatch = 0
for idMatch in tweetIDs:
if idMatch==tempID:
tempDate = str(tweet.created_at).replace(" "," ")
dateTime = tempDate.split(" ")
postDates[postMatch] = dateTime[0]
postTimes[postMatch] = dateTime[1]
contents[postMatch] = tweet.text.replace(",","")
authors[postMatch] = tweet.author.screen_name
followers[postMatch] = str(tweet.author.followers_count)
friends[postMatch] = str(tweet.author.friends_count)
retweetCounts[postMatch] = str(tweet.retweet_count)
favoritesCount[postMatch] = str(tweet.favorite_count)
statusesCount[postMatch] = str(tweet.author.statuses_count)
postMatch = postMatch + 1
tweetCount = 0
except:
print("Tweepy error: skipping cleanup")
pC = 0
for pDate in postDates: #iterates through the word lists and prints matching posts to CSV
csvRow=[postTypes[pC], pDate, postTimes[pC], urls[pC], str(tweetIDs[pC]), contents[pC].replace("\n"," "), retweetCounts[pC], favoritesCount[pC], locations[pC], languages[pC], sentiments[pC], str(neutralScore[pC]), str(positiveScore[pC]), str(negativeScore[pC]), followers[pC], friends[pC], authors[pC], authorGenders[pC], statusesCount[pC]]
writer.writerow(csvRow)
pC = pC + 1
else:
print("Server Error, No Data" + str(webURL.getcode())) #displays error if Crimson URL fails
if __name__ == '__main__':
main()
| en | 0.659623 | Created on Mar 22, 2018 Edited on Jan 11, 2019 @author: npvance2 @author: curtisd2 Variables that will need to be edited/personalized: monitorID in Variables() (line 27) projectStartDate in Variables() (line 28) projectEndDate in Variables() (line 29) authToken in getAuthToken() (line 49) consumer_key in twitterAPI() (line 62) consumer_secret in twitterAPI() (line 63) access_token in twitterAPI() (line 64) access_secret in twitterAPI() (line 65) # The numerical ID for your Crimson Hexagon monitor # Date must be in yyyy-mm-dd format # Date must be in yyyy-mm-dd format #provides URL for Crimson API ########### # # You'll need to generate your own Crimson API key/token from here: # https://apidocs.crimsonhexagon.com/reference # ########### #provides auth token needed to access Crimson API ########### # # You'll need to add your own Twitter API keys here. # Instructions on generating API keys: https://developer.twitter.com/en/docs/basics/authentication/guides/access-tokens.html # API reference guide: https://developer.twitter.com/en/docs/api-reference-index.html # ########### #Provides access keys for Twitter API #provides URL for Twitter api #Combines start and end date into format needed for API call #Gets URL #Gets auth token #endpoint needed for this query #extends call number from 500 to 10,000 #Brings back full contents for Blog and Tumblr posts which are usually truncated around search keywords. This can occasionally disrupt CSV formatting. #Combines all API calls parts into full URL #These initialize the attributes of the final output #identifies date posted #identifies post contents #replaces commas and new lines to facilitate CSV formatting, this occasionally missed new lines in some blog posts which I'm working to fix #identifies author #identifies author gender #identifies location #identifies language specified in the author's profile #identifies the type of post, i.e. Twitter, Tumblr, Blog #if the post type is Twitter it goes through more processing #counts number of tweets #splits URL to get tweetID #the max number of TweetIDs in one API call is 100 so a call is run every 100 tweets identified #call to Twitter API #finds tweetsID #matches tweetID in Twitter API call to tweetID stored from Crimson API #These all fill the matching Crimson attributes to those found in the Twitter API #clears tweet count for a new 100 #clears tweetIDs for next call #finds sentiment value and matching attribute #after loop the Twitter API call must run one more time to clean up all the tweets since the last 100 #iterates through the word lists and prints matching posts to CSV #displays error if Crimson URL fails | 2.607501 | 3 |
roles/slurm/files/startnode.py | danhnguyen48/slurm-elastic-computing | 0 | 8359 | #! /opt/cloud_sdk/bin/python
import asyncio
import logging
import subprocess
import sys
import citc_cloud
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
log.critical("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
async def main() -> None:
nodespace = citc_cloud.get_nodespace()
keys_file = "/home/slurm/opc_authorized_keys"
with open(keys_file) as kf:
ssh_keys = kf.read()
hosts = subprocess.run(["scontrol", "show", "hostnames", sys.argv[1]], stdout=subprocess.PIPE).stdout.decode().split()
await asyncio.gather(*(
citc_cloud.start_node( log, host, nodespace, ssh_keys)
for host in hosts
))
sys.excepthook = handle_exception
if __name__ == "__main__":
log = logging.getLogger("startnode")
log.setLevel(logging.INFO)
handler = logging.FileHandler('/var/log/slurm/elastic.log')
formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.close()
| #! /opt/cloud_sdk/bin/python
import asyncio
import logging
import subprocess
import sys
import citc_cloud
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
log.critical("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
async def main() -> None:
nodespace = citc_cloud.get_nodespace()
keys_file = "/home/slurm/opc_authorized_keys"
with open(keys_file) as kf:
ssh_keys = kf.read()
hosts = subprocess.run(["scontrol", "show", "hostnames", sys.argv[1]], stdout=subprocess.PIPE).stdout.decode().split()
await asyncio.gather(*(
citc_cloud.start_node( log, host, nodespace, ssh_keys)
for host in hosts
))
sys.excepthook = handle_exception
if __name__ == "__main__":
log = logging.getLogger("startnode")
log.setLevel(logging.INFO)
handler = logging.FileHandler('/var/log/slurm/elastic.log')
formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.close()
| en | 0.235294 | #! /opt/cloud_sdk/bin/python | 1.878928 | 2 |
tests/pyre/components/component_class_registration_model.py | BryanRiel/pyre | 0 | 8360 | <filename>tests/pyre/components/component_class_registration_model.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME>
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Verify that component registration interacts correctly with the pyre configurator model
"""
# access
# print(" -- importing pyre")
import pyre
# print(" -- done")
def declare():
# declare a protocol
class protocol(pyre.protocol):
"""a protocol"""
# properties
p1 = pyre.properties.str()
p2 = pyre.properties.str()
# behavior
@pyre.provides
def do(self):
"""behave"""
# declare a component
class component(pyre.component, family="test", implements=protocol):
"""a component"""
# traits
p1 = pyre.properties.str(default="p1")
p2 = pyre.properties.str(default="p2")
@pyre.export
def do(self):
"""behave"""
return "component"
return component
def test():
# and the model
model = pyre.executive.nameserver
# model.dump(pattern='test')
# print(" -- making some configuration changes")
# add an assignment
model['test.p1'] = 'step 1'
# an alias
model.alias(alias='p1', target='test.p1')
# and a reference to the alias
model['ref'] = '{p1}'
# check that they point to the same slot
assert model.retrieve(name='p1') == model.retrieve(name='test.p1')
# save the nodes
ref = model.retrieve(name='ref')
step_0 = model.retrieve(name='test.p1')
# now declare the component and its protocol
# print(" -- declaring components")
component = declare()
# print(" -- done")
# model.dump(pattern='')
assert component.p1 == 'step 1'
assert component.p2 == 'p2'
# check that the model is as we expect
# model.dump()
assert model['test.p1'] == component.p1
assert model['test.p2'] == component.p2
# how about the alias and the reference?
assert model['ref'] == component.p1
assert model['p1'] == component.p1
# make a late registration to what is now the component trait
model['test.p2'] = 'step 2'
# model.dump(pattern='test')
# and check
assert component.p1 == 'step 1'
assert component.p2 == 'step 2'
return
# main
if __name__ == "__main__":
test()
# end of file
| <filename>tests/pyre/components/component_class_registration_model.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME>
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Verify that component registration interacts correctly with the pyre configurator model
"""
# access
# print(" -- importing pyre")
import pyre
# print(" -- done")
def declare():
# declare a protocol
class protocol(pyre.protocol):
"""a protocol"""
# properties
p1 = pyre.properties.str()
p2 = pyre.properties.str()
# behavior
@pyre.provides
def do(self):
"""behave"""
# declare a component
class component(pyre.component, family="test", implements=protocol):
"""a component"""
# traits
p1 = pyre.properties.str(default="p1")
p2 = pyre.properties.str(default="p2")
@pyre.export
def do(self):
"""behave"""
return "component"
return component
def test():
# and the model
model = pyre.executive.nameserver
# model.dump(pattern='test')
# print(" -- making some configuration changes")
# add an assignment
model['test.p1'] = 'step 1'
# an alias
model.alias(alias='p1', target='test.p1')
# and a reference to the alias
model['ref'] = '{p1}'
# check that they point to the same slot
assert model.retrieve(name='p1') == model.retrieve(name='test.p1')
# save the nodes
ref = model.retrieve(name='ref')
step_0 = model.retrieve(name='test.p1')
# now declare the component and its protocol
# print(" -- declaring components")
component = declare()
# print(" -- done")
# model.dump(pattern='')
assert component.p1 == 'step 1'
assert component.p2 == 'p2'
# check that the model is as we expect
# model.dump()
assert model['test.p1'] == component.p1
assert model['test.p2'] == component.p2
# how about the alias and the reference?
assert model['ref'] == component.p1
assert model['p1'] == component.p1
# make a late registration to what is now the component trait
model['test.p2'] = 'step 2'
# model.dump(pattern='test')
# and check
assert component.p1 == 'step 1'
assert component.p2 == 'step 2'
return
# main
if __name__ == "__main__":
test()
# end of file
| en | 0.704659 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # <NAME> # orthologue # (c) 1998-2018 all rights reserved # Verify that component registration interacts correctly with the pyre configurator model # access # print(" -- importing pyre") # print(" -- done") # declare a protocol a protocol # properties # behavior behave # declare a component a component # traits behave # and the model # model.dump(pattern='test') # print(" -- making some configuration changes") # add an assignment # an alias # and a reference to the alias # check that they point to the same slot # save the nodes # now declare the component and its protocol # print(" -- declaring components") # print(" -- done") # model.dump(pattern='') # check that the model is as we expect # model.dump() # how about the alias and the reference? # make a late registration to what is now the component trait # model.dump(pattern='test') # and check # main # end of file | 2.340419 | 2 |
tests/unit/transport/plugins/asyncssh/test_asyncssh_transport.py | carlmontanari/nssh | 1 | 8361 | import asyncio
from io import BytesIO
import pytest
from asyncssh.connection import SSHClientConnection
from asyncssh.stream import SSHReader
from scrapli.exceptions import ScrapliConnectionNotOpened, ScrapliTimeout
class DumbContainer:
def __init__(self):
self.preferred_auth = ()
def __getattr__(self, item):
# options has a billion attributes, just return None, doesnt matter for this test
return None
def test_close(monkeypatch, asyncssh_transport):
def _close(cls):
pass
monkeypatch.setattr(
"asyncssh.connection.SSHClientConnection.close",
_close,
)
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
asyncssh_transport.close()
assert asyncssh_transport.session is None
assert asyncssh_transport.stdin is None
assert asyncssh_transport.stdout is None
def test_close_catch_brokenpipe(monkeypatch, asyncssh_transport):
def _close(cls):
raise BrokenPipeError
monkeypatch.setattr(
"asyncssh.connection.SSHClientConnection.close",
_close,
)
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
asyncssh_transport.close()
assert asyncssh_transport.session is None
assert asyncssh_transport.stdin is None
assert asyncssh_transport.stdout is None
def test_isalive_no_session(asyncssh_transport):
assert asyncssh_transport.isalive() is False
def test_isalive(asyncssh_transport):
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
# lie and tell asyncssh auth is done
asyncssh_transport.session._auth_complete = True
# also have to lie and create a transport and have it return False when is_closing is called
asyncssh_transport.session._transport = DumbContainer()
asyncssh_transport.session._transport.is_closing = lambda: False
assert asyncssh_transport.isalive() is True
def test_isalive_attribute_error(asyncssh_transport):
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
# lie and tell asyncssh auth is done
asyncssh_transport.session._auth_complete = True
assert asyncssh_transport.isalive() is False
async def test_read(monkeypatch, asyncssh_transport):
async def _read(cls, _):
return b"somebytes"
monkeypatch.setattr(
"asyncssh.stream.SSHReader.read",
_read,
)
# lie and pretend the session is already assigned/stdout is already a thing
asyncssh_transport.stdout = SSHReader("", "")
assert await asyncssh_transport.read() == b"somebytes"
async def test_read_exception_not_open(asyncssh_transport):
with pytest.raises(ScrapliConnectionNotOpened):
await asyncssh_transport.read()
async def test_read_exception_timeout(monkeypatch, asyncssh_transport):
async def _read(cls, _):
await asyncio.sleep(0.5)
monkeypatch.setattr(
"asyncssh.stream.SSHReader.read",
_read,
)
# lie and pretend the session is already assigned/stdout is already a thing
asyncssh_transport.stdout = SSHReader("", "")
asyncssh_transport._base_transport_args.timeout_transport = 0.1
with pytest.raises(ScrapliTimeout):
await asyncssh_transport.read()
def test_write(asyncssh_transport):
asyncssh_transport.stdin = BytesIO()
asyncssh_transport.write(b"blah")
asyncssh_transport.stdin.seek(0)
assert asyncssh_transport.stdin.read() == b"blah"
def test_write_exception(asyncssh_transport):
with pytest.raises(ScrapliConnectionNotOpened):
asyncssh_transport.write("blah")
| import asyncio
from io import BytesIO
import pytest
from asyncssh.connection import SSHClientConnection
from asyncssh.stream import SSHReader
from scrapli.exceptions import ScrapliConnectionNotOpened, ScrapliTimeout
class DumbContainer:
def __init__(self):
self.preferred_auth = ()
def __getattr__(self, item):
# options has a billion attributes, just return None, doesnt matter for this test
return None
def test_close(monkeypatch, asyncssh_transport):
def _close(cls):
pass
monkeypatch.setattr(
"asyncssh.connection.SSHClientConnection.close",
_close,
)
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
asyncssh_transport.close()
assert asyncssh_transport.session is None
assert asyncssh_transport.stdin is None
assert asyncssh_transport.stdout is None
def test_close_catch_brokenpipe(monkeypatch, asyncssh_transport):
def _close(cls):
raise BrokenPipeError
monkeypatch.setattr(
"asyncssh.connection.SSHClientConnection.close",
_close,
)
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
asyncssh_transport.close()
assert asyncssh_transport.session is None
assert asyncssh_transport.stdin is None
assert asyncssh_transport.stdout is None
def test_isalive_no_session(asyncssh_transport):
assert asyncssh_transport.isalive() is False
def test_isalive(asyncssh_transport):
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
# lie and tell asyncssh auth is done
asyncssh_transport.session._auth_complete = True
# also have to lie and create a transport and have it return False when is_closing is called
asyncssh_transport.session._transport = DumbContainer()
asyncssh_transport.session._transport.is_closing = lambda: False
assert asyncssh_transport.isalive() is True
def test_isalive_attribute_error(asyncssh_transport):
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
# lie and tell asyncssh auth is done
asyncssh_transport.session._auth_complete = True
assert asyncssh_transport.isalive() is False
async def test_read(monkeypatch, asyncssh_transport):
async def _read(cls, _):
return b"somebytes"
monkeypatch.setattr(
"asyncssh.stream.SSHReader.read",
_read,
)
# lie and pretend the session is already assigned/stdout is already a thing
asyncssh_transport.stdout = SSHReader("", "")
assert await asyncssh_transport.read() == b"somebytes"
async def test_read_exception_not_open(asyncssh_transport):
with pytest.raises(ScrapliConnectionNotOpened):
await asyncssh_transport.read()
async def test_read_exception_timeout(monkeypatch, asyncssh_transport):
async def _read(cls, _):
await asyncio.sleep(0.5)
monkeypatch.setattr(
"asyncssh.stream.SSHReader.read",
_read,
)
# lie and pretend the session is already assigned/stdout is already a thing
asyncssh_transport.stdout = SSHReader("", "")
asyncssh_transport._base_transport_args.timeout_transport = 0.1
with pytest.raises(ScrapliTimeout):
await asyncssh_transport.read()
def test_write(asyncssh_transport):
asyncssh_transport.stdin = BytesIO()
asyncssh_transport.write(b"blah")
asyncssh_transport.stdin.seek(0)
assert asyncssh_transport.stdin.read() == b"blah"
def test_write_exception(asyncssh_transport):
with pytest.raises(ScrapliConnectionNotOpened):
asyncssh_transport.write("blah")
| en | 0.940067 | # options has a billion attributes, just return None, doesnt matter for this test # lie and pretend the session is already assigned # lie and pretend the session is already assigned # lie and pretend the session is already assigned # lie and tell asyncssh auth is done # also have to lie and create a transport and have it return False when is_closing is called # lie and pretend the session is already assigned # lie and tell asyncssh auth is done # lie and pretend the session is already assigned/stdout is already a thing # lie and pretend the session is already assigned/stdout is already a thing | 1.981328 | 2 |
apps/ignite/views.py | Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803 | 2 | 8362 | <filename>apps/ignite/views.py
from django.shortcuts import get_object_or_404
import jingo
import waffle
from django.contrib.auth.models import User
from challenges.models import Submission, Category
from projects.models import Project
from blogs.models import BlogEntry
from events.models import Event
def splash(request, project, slug, template_name='ignite/splash.html'):
"""Show an individual project challenge."""
project = get_object_or_404(Project, slug=project)
challenge = get_object_or_404(project.challenge_set, slug=slug)
num_blogs = 3
# have we announced the winners yet - switch template
if waffle.switch_is_active('announce_winners'):
template_name = 'ignite/homepage-winners.html'
num_blogs = 5
blogs = BlogEntry.objects.filter(
page='splash'
).order_by("-updated",)[:num_blogs]
# if the dev challenge is open we want to only show dev entries
if request.development.is_open:
entries = (Submission.objects.visible()
.filter(phase__challenge=challenge)
.filter(phase__name="Development")
.order_by("?"))
num_entries = len(entries)
entries_from = 'apps'
if num_entries < 5:
entries = (Submission.objects.visible()
.filter(phase__challenge=challenge)
.filter(phase__name="Ideation")
.order_by("?"))
entries_from = 'ideas'
else:
entries = (Submission.objects.visible()
.filter(phase__challenge=challenge)
.filter(phase__name="Ideation")
.order_by("?"))
entries_from = 'ideas'
event_list = Event.objects.get_featured()[:5]
return jingo.render(request, template_name, {
'challenge': challenge,
'project': project,
'phases': list(enumerate(challenge.phases.all(), start=1)),
'entries': entries[:5],
'categories': Category.objects.all(),
'blogs': blogs,
'event_list': event_list,
'entries_from': entries_from,
})
def about(request, project, slug, template_name='ignite/about.html'):
if waffle.switch_is_active('announce_winners'):
template_name = 'ignite/about-winners.html'
return jingo.render(request, template_name)
def judges(request, project, slug, template_name='challenges/all_judges.html'):
""" List all judges we have in the system """
profiles = []
for judge in User.objects.filter(groups__name='Judges'):
profile = judge.get_profile()
# we only want to show featured profiles
if profile.featured == True:
profiles.append(profile)
return jingo.render(request, 'ignite/judges.html', {
'profiles': profiles
})
def terms(request, project, slug, template_name='static/terms_conditions.html'):
return jingo.render(request, template_name, {})
def terms_development(request, project, slug, template_name='static/terms_conditions_development.html'):
return jingo.render(request, template_name, {})
def fail(request, template_name='404.html'):
return jingo.render(request, template_name, {}, status=404)
def app_fail(request, template_name='500.html'):
return jingo.render(request, template_name, {}, status=500)
def action_unavailable_response(request, message=None,
template_name="action_unavailable.html"):
"""Generic page for unavailable actions"""
context = {'message': message}
return jingo.render(request, template_name, context, status=403)
| <filename>apps/ignite/views.py
from django.shortcuts import get_object_or_404
import jingo
import waffle
from django.contrib.auth.models import User
from challenges.models import Submission, Category
from projects.models import Project
from blogs.models import BlogEntry
from events.models import Event
def splash(request, project, slug, template_name='ignite/splash.html'):
"""Show an individual project challenge."""
project = get_object_or_404(Project, slug=project)
challenge = get_object_or_404(project.challenge_set, slug=slug)
num_blogs = 3
# have we announced the winners yet - switch template
if waffle.switch_is_active('announce_winners'):
template_name = 'ignite/homepage-winners.html'
num_blogs = 5
blogs = BlogEntry.objects.filter(
page='splash'
).order_by("-updated",)[:num_blogs]
# if the dev challenge is open we want to only show dev entries
if request.development.is_open:
entries = (Submission.objects.visible()
.filter(phase__challenge=challenge)
.filter(phase__name="Development")
.order_by("?"))
num_entries = len(entries)
entries_from = 'apps'
if num_entries < 5:
entries = (Submission.objects.visible()
.filter(phase__challenge=challenge)
.filter(phase__name="Ideation")
.order_by("?"))
entries_from = 'ideas'
else:
entries = (Submission.objects.visible()
.filter(phase__challenge=challenge)
.filter(phase__name="Ideation")
.order_by("?"))
entries_from = 'ideas'
event_list = Event.objects.get_featured()[:5]
return jingo.render(request, template_name, {
'challenge': challenge,
'project': project,
'phases': list(enumerate(challenge.phases.all(), start=1)),
'entries': entries[:5],
'categories': Category.objects.all(),
'blogs': blogs,
'event_list': event_list,
'entries_from': entries_from,
})
def about(request, project, slug, template_name='ignite/about.html'):
if waffle.switch_is_active('announce_winners'):
template_name = 'ignite/about-winners.html'
return jingo.render(request, template_name)
def judges(request, project, slug, template_name='challenges/all_judges.html'):
""" List all judges we have in the system """
profiles = []
for judge in User.objects.filter(groups__name='Judges'):
profile = judge.get_profile()
# we only want to show featured profiles
if profile.featured == True:
profiles.append(profile)
return jingo.render(request, 'ignite/judges.html', {
'profiles': profiles
})
def terms(request, project, slug, template_name='static/terms_conditions.html'):
return jingo.render(request, template_name, {})
def terms_development(request, project, slug, template_name='static/terms_conditions_development.html'):
return jingo.render(request, template_name, {})
def fail(request, template_name='404.html'):
return jingo.render(request, template_name, {}, status=404)
def app_fail(request, template_name='500.html'):
return jingo.render(request, template_name, {}, status=500)
def action_unavailable_response(request, message=None,
template_name="action_unavailable.html"):
"""Generic page for unavailable actions"""
context = {'message': message}
return jingo.render(request, template_name, context, status=403)
| en | 0.910673 | Show an individual project challenge. # have we announced the winners yet - switch template # if the dev challenge is open we want to only show dev entries List all judges we have in the system # we only want to show featured profiles Generic page for unavailable actions | 1.904672 | 2 |
dataPresenter.py | thebouv/IUS-Hacktoberfest | 3 | 8363 | from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.io as pio
from dataProcessor import parseLabels, parseLangs
import plotly.io as pio
import os
years = parseLabels()
langs = parseLangs()
#make the plotly results
fig = make_subplots(
rows=1, cols=2,
specs=[[{"type": "xy"}, {"type": "domain"}]],
)
fig.add_trace(go.Bar(y = list(langs.values()), x = list(langs.keys()), showlegend=False),
row=1, col=1)
fig.add_trace(go.Pie(values = list(years.values()), labels = list(years.keys())),
row=1, col=2)
fig.update_layout(height=600)
pio.write_html(fig, 'index.html', auto_open=True)
| from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.io as pio
from dataProcessor import parseLabels, parseLangs
import plotly.io as pio
import os
years = parseLabels()
langs = parseLangs()
#make the plotly results
fig = make_subplots(
rows=1, cols=2,
specs=[[{"type": "xy"}, {"type": "domain"}]],
)
fig.add_trace(go.Bar(y = list(langs.values()), x = list(langs.keys()), showlegend=False),
row=1, col=1)
fig.add_trace(go.Pie(values = list(years.values()), labels = list(years.keys())),
row=1, col=2)
fig.update_layout(height=600)
pio.write_html(fig, 'index.html', auto_open=True)
| en | 0.915831 | #make the plotly results | 2.750517 | 3 |
bdlb/diabetic_retinopathy_diagnosis/benchmark.py | Sairam954/bdl-benchmarks | 666 | 8364 | # Copyright 2019 BDL Benchmarks Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diabetic retinopathy diagnosis BDL Benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Sequence
from typing import Text
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import tensorflow as tf
from absl import logging
from ..core import transforms
from ..core.benchmark import Benchmark
from ..core.benchmark import BenchmarkInfo
from ..core.benchmark import DataSplits
from ..core.constants import DATA_DIR
from ..core.levels import Level
tfk = tf.keras
_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR = os.path.join(
DATA_DIR, "downloads", "manual", "diabetic_retinopathy_diagnosis")
class DiabeticRetinopathyDiagnosisBecnhmark(Benchmark):
"""Diabetic retinopathy diagnosis benchmark class."""
def __init__(
self,
level: Union[Text, Level],
batch_size: int = 64,
data_dir: Optional[Text] = None,
download_and_prepare: bool = False,
):
"""Constructs a benchmark object.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
download_and_prepare: (optional) `bool`, if the data is not available
it downloads and preprocesses it.
"""
self.__level = level if isinstance(level, Level) else Level.from_str(level)
try:
self.__ds = self.load(level=level,
batch_size=batch_size,
data_dir=data_dir or DATA_DIR)
except AssertionError:
if not download_and_prepare:
raise
else:
logging.info(
"Data not found, `DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()`"
" is now running...")
self.download_and_prepare()
@classmethod
def evaluate(
cls,
estimator: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
dataset: tf.data.Dataset,
output_dir: Optional[Text] = None,
name: Optional[Text] = None,
) -> Dict[Text, float]:
"""Evaluates an `estimator` on the `mode` benchmark dataset.
Args:
estimator: `lambda x: mu_x, uncertainty_x`, an uncertainty estimation
function, which returns `mean_x` and predictive `uncertainty_x`.
dataset: `tf.data.Dataset`, on which dataset to performance evaluation.
output_dir: (optional) `str`, directory to save figures.
name: (optional) `str`, the name of the method.
"""
import inspect
import tqdm
import tensorflow_datasets as tfds
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# Containers used for caching performance evaluation
y_true = list()
y_pred = list()
y_uncertainty = list()
# Convert to NumPy iterator if necessary
ds = dataset if inspect.isgenerator(dataset) else tfds.as_numpy(dataset)
for x, y in tqdm.tqdm(ds):
# Sample from probabilistic model
mean, uncertainty = estimator(x)
# Cache predictions
y_true.append(y)
y_pred.append(mean)
y_uncertainty.append(uncertainty)
# Use vectorized NumPy containers
y_true = np.concatenate(y_true).flatten()
y_pred = np.concatenate(y_pred).flatten()
y_uncertainty = np.concatenate(y_uncertainty).flatten()
fractions = np.asarray([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
# Metrics for evaluation
metrics = zip(["accuracy", "auc"], cls.metrics())
return {
metric: cls._evaluate_metric(
y_true,
y_pred,
y_uncertainty,
fractions,
lambda y_true, y_pred: metric_fn(y_true, y_pred).numpy(),
name,
) for (metric, metric_fn) in metrics
}
@staticmethod
def _evaluate_metric(
y_true: np.ndarray,
y_pred: np.ndarray,
y_uncertainty: np.ndarray,
fractions: Sequence[float],
metric_fn: Callable[[np.ndarray, np.ndarray], float],
name=None,
) -> pd.DataFrame:
"""Evaluate model predictive distribution on `metric_fn` at data retain
`fractions`.
Args:
y_true: `numpy.ndarray`, the ground truth labels, with shape [N].
y_pred: `numpy.ndarray`, the model predictions, with shape [N].
y_uncertainty: `numpy.ndarray`, the model uncertainties,
with shape [N].
fractions: `iterable`, the percentages of data to retain for
calculating `metric_fn`.
metric_fn: `lambda(y_true, y_pred) -> float`, a metric
function that provides a score given ground truths
and predictions.
name: (optional) `str`, the name of the method.
Returns:
A `pandas.DataFrame` with columns ["retained_data", "mean", "std"],
that summarizes the scores at different data retained fractions.
"""
N = y_true.shape[0]
# Sorts indexes by ascending uncertainty
I_uncertainties = np.argsort(y_uncertainty)
# Score containers
mean = np.empty_like(fractions)
# TODO(filangel): do bootstrap sampling and estimate standard error
std = np.zeros_like(fractions)
for i, frac in enumerate(fractions):
# Keep only the %-frac of lowest uncertainties
I = np.zeros(N, dtype=bool)
I[I_uncertainties[:int(N * frac)]] = True
mean[i] = metric_fn(y_true[I], y_pred[I])
# Store
df = pd.DataFrame(dict(retained_data=fractions, mean=mean, std=std))
df.name = name
return df
@property
def datasets(self) -> tf.data.Dataset:
"""Pointer to the processed datasets."""
return self.__ds
@property
def info(self) -> BenchmarkInfo:
"""Text description of the benchmark."""
return BenchmarkInfo(description="", urls="", setup="", citation="")
@property
def level(self) -> Level:
"""The downstream task level."""
return self.__level
@staticmethod
def loss() -> tfk.losses.Loss:
"""Loss used for training binary classifiers."""
return tfk.losses.BinaryCrossentropy()
@staticmethod
def metrics() -> tfk.metrics.Metric:
"""Evaluation metrics used for monitoring training."""
return [tfk.metrics.BinaryAccuracy(), tfk.metrics.AUC()]
@staticmethod
def class_weight() -> Sequence[float]:
"""Class weights used for rebalancing the dataset, by skewing the `loss`
accordingly."""
return [1.0, 4.0]
@classmethod
def load(
cls,
level: Union[Text, Level] = "realworld",
batch_size: int = 64,
data_dir: Optional[Text] = None,
as_numpy: bool = False,
) -> DataSplits:
"""Loads the datasets for the benchmark.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
as_numpy: (optional) `bool`, if True returns python generators
with `numpy.ndarray` outputs.
Returns:
A namedtuple with properties:
* train: `tf.data.Dataset`, train dataset.
* validation: `tf.data.Dataset`, validation dataset.
* test: `tf.data.Dataset`, test dataset.
"""
import tensorflow_datasets as tfds
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Fetch datasets
try:
ds_train, ds_validation, ds_test = DiabeticRetinopathyDiagnosis(
data_dir=data_dir or DATA_DIR,
config=level).as_dataset(split=["train", "validation", "test"],
shuffle_files=True,
batch_size=batch_size)
except AssertionError as ae:
raise AssertionError(
str(ae) +
" Run DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()"
" first and then retry.")
# Parse task level
level = level if isinstance(level, Level) else Level.from_str(level)
# Dataset tranformations
transforms_train, transforms_eval = cls._preprocessors()
# Apply transformations
ds_train = ds_train.map(transforms_train,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.map(
transforms_eval, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(transforms_eval,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Prefetches datasets to memory
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
if as_numpy:
# Convert to NumPy iterators
ds_train = tfds.as_numpy(ds_train)
ds_validation = tfds.as_numpy(ds_validation)
ds_test = tfds.as_numpy(ds_test)
return DataSplits(ds_train, ds_validation, ds_test)
@classmethod
def download_and_prepare(cls, levels=None) -> None:
"""Downloads dataset from Kaggle, extracts zip files and processes it using
`tensorflow_datasets`.
Args:
levels: (optional) `iterable` of `str`, specifies which
levels from {'medium', 'realworld'} to prepare,
if None it prepares all the levels.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
# Disable GPU for data download, extraction and preparation
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
cls._download()
# cls._extract()
#cls._prepare(levels)
@staticmethod
def _download() -> None:
"""Downloads data from Kaggle using `tensorflow_datasets`.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
import subprocess as sp
import tensorflow_datasets as tfds
# Append `/home/$USER/.local/bin` to path
os.environ["PATH"] += ":/home/{}/.local/bin/".format(os.environ["USER"])
# Download all files from Kaggle
drd = tfds.download.kaggle.KaggleCompetitionDownloader(
"diabetic-retinopathy-detection")
try:
for dfile in drd.competition_files:
drd.download_file(dfile,
output_dir=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
except sp.CalledProcessError as cpe:
raise OSError(
str(cpe) + "." +
" Make sure you have ~/.kaggle/kaggle.json setup, fetched from the Kaggle website"
" https://www.kaggle.com/<username>/account -> 'Create New API Key'."
" Also accept the dataset license by going to"
" https://www.kaggle.com/c/diabetic-retinopathy-detection/rules"
" and look for the button 'I Understand and Accept' (make sure when reloading the"
" page that the button does not pop up again).")
@staticmethod
def _extract() -> None:
"""Extracts zip files downloaded from Kaggle."""
import glob
import tqdm
import zipfile
import tempfile
# Extract train and test original images
for split in ["train", "test"]:
# Extract "<split>.zip.00*"" files to "<split>"
with tempfile.NamedTemporaryFile() as tmp:
# Concatenate "<split>.zip.00*" to "<split>.zip"
for fname in tqdm.tqdm(
sorted(
glob.glob(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{split}.zip.00*".format(split=split))))):
# Unzip "<split>.zip" to "<split>"
with open(fname, "rb") as ztmp:
tmp.write(ztmp.read())
with zipfile.ZipFile(tmp) as zfile:
for image in tqdm.tqdm(iterable=zfile.namelist(),
total=len(zfile.namelist())):
zfile.extract(member=image,
path=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
# Delete "<split>.zip.00*" files
for splitzip in os.listdir(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR):
if "{split}.zip.00".format(split=split) in splitzip:
os.remove(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR, splitzip))
# Extract "sample.zip", "trainLabels.csv.zip"
for fname in ["sample", "trainLabels.csv"]:
zfname = os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{fname}.zip".format(fname=fname))
with zipfile.ZipFile(zfname) as zfile:
zfile.extractall(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
os.remove(zfname)
@staticmethod
def _prepare(levels=None) -> None:
"""Generates the TFRecord objects for medium and realworld experiments."""
import multiprocessing
from absl import logging
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Hangle each level individually
for level in levels or ["medium", "realworld"]:
dtask = DiabeticRetinopathyDiagnosis(data_dir=DATA_DIR, config=level)
logging.debug("=== Preparing TFRecords for {} ===".format(level))
dtask.download_and_prepare()
@classmethod
def _preprocessors(cls) -> Tuple[transforms.Transform, transforms.Transform]:
"""Applies transformations to the raw data."""
import tensorflow_datasets as tfds
# Transformation hyperparameters
mean = np.asarray([0.42606387, 0.29752496, 0.21309826])
stddev = np.asarray([0.27662534, 0.20280295, 0.1687619])
class Parse(transforms.Transform):
"""Parses datapoints from raw `tf.data.Dataset`."""
def __call__(self, x, y=None):
"""Returns `as_supervised` tuple."""
return x["image"], x["label"]
class CastX(transforms.Transform):
"""Casts image to `dtype`."""
def __init__(self, dtype):
"""Constructs a type caster."""
self.dtype = dtype
def __call__(self, x, y):
"""Returns casted image (to `dtype`) and its (unchanged) label as
tuple."""
return tf.cast(x, self.dtype), y
class To01X(transforms.Transform):
"""Rescales image to [min, max]=[0, 1]."""
def __call__(self, x, y):
"""Returns rescaled image and its (unchanged) label as tuple."""
return x / 255.0, y
# Get augmentation schemes
[augmentation_config,
no_augmentation_config] = cls._ImageDataGenerator_config()
# Transformations for train dataset
transforms_train = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**augmentation_config),
])
# Transformations for validation/test dataset
transforms_eval = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**no_augmentation_config),
])
return transforms_train, transforms_eval
@staticmethod
def _ImageDataGenerator_config():
"""Returns the configs for the
`tensorflow.keras.preprocessing.image.ImageDataGenerator`, used for the
random augmentation of the dataset, following the implementation of
https://github.com/chleibig/disease-detection/blob/f3401b26aa9b832ff77afe93
e3faa342f7d088e5/scripts/inspect_data_augmentation.py."""
augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=180.0,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.,
zoom_range=0.10,
channel_shift_range=0.,
fill_mode="constant",
cval=0.,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
)
no_augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.0,
width_shift_range=0.0,
height_shift_range=0.0,
shear_range=0.,
zoom_range=0.0,
channel_shift_range=0.,
fill_mode="nearest",
cval=0.,
horizontal_flip=False,
vertical_flip=False,
data_format="channels_last",
)
return augmentation_config, no_augmentation_config
| # Copyright 2019 BDL Benchmarks Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diabetic retinopathy diagnosis BDL Benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Sequence
from typing import Text
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import tensorflow as tf
from absl import logging
from ..core import transforms
from ..core.benchmark import Benchmark
from ..core.benchmark import BenchmarkInfo
from ..core.benchmark import DataSplits
from ..core.constants import DATA_DIR
from ..core.levels import Level
tfk = tf.keras
_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR = os.path.join(
DATA_DIR, "downloads", "manual", "diabetic_retinopathy_diagnosis")
class DiabeticRetinopathyDiagnosisBecnhmark(Benchmark):
"""Diabetic retinopathy diagnosis benchmark class."""
def __init__(
self,
level: Union[Text, Level],
batch_size: int = 64,
data_dir: Optional[Text] = None,
download_and_prepare: bool = False,
):
"""Constructs a benchmark object.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
download_and_prepare: (optional) `bool`, if the data is not available
it downloads and preprocesses it.
"""
self.__level = level if isinstance(level, Level) else Level.from_str(level)
try:
self.__ds = self.load(level=level,
batch_size=batch_size,
data_dir=data_dir or DATA_DIR)
except AssertionError:
if not download_and_prepare:
raise
else:
logging.info(
"Data not found, `DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()`"
" is now running...")
self.download_and_prepare()
@classmethod
def evaluate(
cls,
estimator: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
dataset: tf.data.Dataset,
output_dir: Optional[Text] = None,
name: Optional[Text] = None,
) -> Dict[Text, float]:
"""Evaluates an `estimator` on the `mode` benchmark dataset.
Args:
estimator: `lambda x: mu_x, uncertainty_x`, an uncertainty estimation
function, which returns `mean_x` and predictive `uncertainty_x`.
dataset: `tf.data.Dataset`, on which dataset to performance evaluation.
output_dir: (optional) `str`, directory to save figures.
name: (optional) `str`, the name of the method.
"""
import inspect
import tqdm
import tensorflow_datasets as tfds
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# Containers used for caching performance evaluation
y_true = list()
y_pred = list()
y_uncertainty = list()
# Convert to NumPy iterator if necessary
ds = dataset if inspect.isgenerator(dataset) else tfds.as_numpy(dataset)
for x, y in tqdm.tqdm(ds):
# Sample from probabilistic model
mean, uncertainty = estimator(x)
# Cache predictions
y_true.append(y)
y_pred.append(mean)
y_uncertainty.append(uncertainty)
# Use vectorized NumPy containers
y_true = np.concatenate(y_true).flatten()
y_pred = np.concatenate(y_pred).flatten()
y_uncertainty = np.concatenate(y_uncertainty).flatten()
fractions = np.asarray([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
# Metrics for evaluation
metrics = zip(["accuracy", "auc"], cls.metrics())
return {
metric: cls._evaluate_metric(
y_true,
y_pred,
y_uncertainty,
fractions,
lambda y_true, y_pred: metric_fn(y_true, y_pred).numpy(),
name,
) for (metric, metric_fn) in metrics
}
@staticmethod
def _evaluate_metric(
y_true: np.ndarray,
y_pred: np.ndarray,
y_uncertainty: np.ndarray,
fractions: Sequence[float],
metric_fn: Callable[[np.ndarray, np.ndarray], float],
name=None,
) -> pd.DataFrame:
"""Evaluate model predictive distribution on `metric_fn` at data retain
`fractions`.
Args:
y_true: `numpy.ndarray`, the ground truth labels, with shape [N].
y_pred: `numpy.ndarray`, the model predictions, with shape [N].
y_uncertainty: `numpy.ndarray`, the model uncertainties,
with shape [N].
fractions: `iterable`, the percentages of data to retain for
calculating `metric_fn`.
metric_fn: `lambda(y_true, y_pred) -> float`, a metric
function that provides a score given ground truths
and predictions.
name: (optional) `str`, the name of the method.
Returns:
A `pandas.DataFrame` with columns ["retained_data", "mean", "std"],
that summarizes the scores at different data retained fractions.
"""
N = y_true.shape[0]
# Sorts indexes by ascending uncertainty
I_uncertainties = np.argsort(y_uncertainty)
# Score containers
mean = np.empty_like(fractions)
# TODO(filangel): do bootstrap sampling and estimate standard error
std = np.zeros_like(fractions)
for i, frac in enumerate(fractions):
# Keep only the %-frac of lowest uncertainties
I = np.zeros(N, dtype=bool)
I[I_uncertainties[:int(N * frac)]] = True
mean[i] = metric_fn(y_true[I], y_pred[I])
# Store
df = pd.DataFrame(dict(retained_data=fractions, mean=mean, std=std))
df.name = name
return df
@property
def datasets(self) -> tf.data.Dataset:
"""Pointer to the processed datasets."""
return self.__ds
@property
def info(self) -> BenchmarkInfo:
"""Text description of the benchmark."""
return BenchmarkInfo(description="", urls="", setup="", citation="")
@property
def level(self) -> Level:
"""The downstream task level."""
return self.__level
@staticmethod
def loss() -> tfk.losses.Loss:
"""Loss used for training binary classifiers."""
return tfk.losses.BinaryCrossentropy()
@staticmethod
def metrics() -> tfk.metrics.Metric:
"""Evaluation metrics used for monitoring training."""
return [tfk.metrics.BinaryAccuracy(), tfk.metrics.AUC()]
@staticmethod
def class_weight() -> Sequence[float]:
"""Class weights used for rebalancing the dataset, by skewing the `loss`
accordingly."""
return [1.0, 4.0]
@classmethod
def load(
cls,
level: Union[Text, Level] = "realworld",
batch_size: int = 64,
data_dir: Optional[Text] = None,
as_numpy: bool = False,
) -> DataSplits:
"""Loads the datasets for the benchmark.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
as_numpy: (optional) `bool`, if True returns python generators
with `numpy.ndarray` outputs.
Returns:
A namedtuple with properties:
* train: `tf.data.Dataset`, train dataset.
* validation: `tf.data.Dataset`, validation dataset.
* test: `tf.data.Dataset`, test dataset.
"""
import tensorflow_datasets as tfds
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Fetch datasets
try:
ds_train, ds_validation, ds_test = DiabeticRetinopathyDiagnosis(
data_dir=data_dir or DATA_DIR,
config=level).as_dataset(split=["train", "validation", "test"],
shuffle_files=True,
batch_size=batch_size)
except AssertionError as ae:
raise AssertionError(
str(ae) +
" Run DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()"
" first and then retry.")
# Parse task level
level = level if isinstance(level, Level) else Level.from_str(level)
# Dataset tranformations
transforms_train, transforms_eval = cls._preprocessors()
# Apply transformations
ds_train = ds_train.map(transforms_train,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.map(
transforms_eval, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(transforms_eval,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Prefetches datasets to memory
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
if as_numpy:
# Convert to NumPy iterators
ds_train = tfds.as_numpy(ds_train)
ds_validation = tfds.as_numpy(ds_validation)
ds_test = tfds.as_numpy(ds_test)
return DataSplits(ds_train, ds_validation, ds_test)
@classmethod
def download_and_prepare(cls, levels=None) -> None:
"""Downloads dataset from Kaggle, extracts zip files and processes it using
`tensorflow_datasets`.
Args:
levels: (optional) `iterable` of `str`, specifies which
levels from {'medium', 'realworld'} to prepare,
if None it prepares all the levels.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
# Disable GPU for data download, extraction and preparation
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
cls._download()
# cls._extract()
#cls._prepare(levels)
@staticmethod
def _download() -> None:
"""Downloads data from Kaggle using `tensorflow_datasets`.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
import subprocess as sp
import tensorflow_datasets as tfds
# Append `/home/$USER/.local/bin` to path
os.environ["PATH"] += ":/home/{}/.local/bin/".format(os.environ["USER"])
# Download all files from Kaggle
drd = tfds.download.kaggle.KaggleCompetitionDownloader(
"diabetic-retinopathy-detection")
try:
for dfile in drd.competition_files:
drd.download_file(dfile,
output_dir=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
except sp.CalledProcessError as cpe:
raise OSError(
str(cpe) + "." +
" Make sure you have ~/.kaggle/kaggle.json setup, fetched from the Kaggle website"
" https://www.kaggle.com/<username>/account -> 'Create New API Key'."
" Also accept the dataset license by going to"
" https://www.kaggle.com/c/diabetic-retinopathy-detection/rules"
" and look for the button 'I Understand and Accept' (make sure when reloading the"
" page that the button does not pop up again).")
@staticmethod
def _extract() -> None:
"""Extracts zip files downloaded from Kaggle."""
import glob
import tqdm
import zipfile
import tempfile
# Extract train and test original images
for split in ["train", "test"]:
# Extract "<split>.zip.00*"" files to "<split>"
with tempfile.NamedTemporaryFile() as tmp:
# Concatenate "<split>.zip.00*" to "<split>.zip"
for fname in tqdm.tqdm(
sorted(
glob.glob(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{split}.zip.00*".format(split=split))))):
# Unzip "<split>.zip" to "<split>"
with open(fname, "rb") as ztmp:
tmp.write(ztmp.read())
with zipfile.ZipFile(tmp) as zfile:
for image in tqdm.tqdm(iterable=zfile.namelist(),
total=len(zfile.namelist())):
zfile.extract(member=image,
path=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
# Delete "<split>.zip.00*" files
for splitzip in os.listdir(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR):
if "{split}.zip.00".format(split=split) in splitzip:
os.remove(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR, splitzip))
# Extract "sample.zip", "trainLabels.csv.zip"
for fname in ["sample", "trainLabels.csv"]:
zfname = os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{fname}.zip".format(fname=fname))
with zipfile.ZipFile(zfname) as zfile:
zfile.extractall(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
os.remove(zfname)
@staticmethod
def _prepare(levels=None) -> None:
"""Generates the TFRecord objects for medium and realworld experiments."""
import multiprocessing
from absl import logging
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Hangle each level individually
for level in levels or ["medium", "realworld"]:
dtask = DiabeticRetinopathyDiagnosis(data_dir=DATA_DIR, config=level)
logging.debug("=== Preparing TFRecords for {} ===".format(level))
dtask.download_and_prepare()
@classmethod
def _preprocessors(cls) -> Tuple[transforms.Transform, transforms.Transform]:
"""Applies transformations to the raw data."""
import tensorflow_datasets as tfds
# Transformation hyperparameters
mean = np.asarray([0.42606387, 0.29752496, 0.21309826])
stddev = np.asarray([0.27662534, 0.20280295, 0.1687619])
class Parse(transforms.Transform):
"""Parses datapoints from raw `tf.data.Dataset`."""
def __call__(self, x, y=None):
"""Returns `as_supervised` tuple."""
return x["image"], x["label"]
class CastX(transforms.Transform):
"""Casts image to `dtype`."""
def __init__(self, dtype):
"""Constructs a type caster."""
self.dtype = dtype
def __call__(self, x, y):
"""Returns casted image (to `dtype`) and its (unchanged) label as
tuple."""
return tf.cast(x, self.dtype), y
class To01X(transforms.Transform):
"""Rescales image to [min, max]=[0, 1]."""
def __call__(self, x, y):
"""Returns rescaled image and its (unchanged) label as tuple."""
return x / 255.0, y
# Get augmentation schemes
[augmentation_config,
no_augmentation_config] = cls._ImageDataGenerator_config()
# Transformations for train dataset
transforms_train = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**augmentation_config),
])
# Transformations for validation/test dataset
transforms_eval = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**no_augmentation_config),
])
return transforms_train, transforms_eval
@staticmethod
def _ImageDataGenerator_config():
"""Returns the configs for the
`tensorflow.keras.preprocessing.image.ImageDataGenerator`, used for the
random augmentation of the dataset, following the implementation of
https://github.com/chleibig/disease-detection/blob/f3401b26aa9b832ff77afe93
e3faa342f7d088e5/scripts/inspect_data_augmentation.py."""
augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=180.0,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.,
zoom_range=0.10,
channel_shift_range=0.,
fill_mode="constant",
cval=0.,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
)
no_augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.0,
width_shift_range=0.0,
height_shift_range=0.0,
shear_range=0.,
zoom_range=0.0,
channel_shift_range=0.,
fill_mode="nearest",
cval=0.,
horizontal_flip=False,
vertical_flip=False,
data_format="channels_last",
)
return augmentation_config, no_augmentation_config
| en | 0.651296 | # Copyright 2019 BDL Benchmarks Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Diabetic retinopathy diagnosis BDL Benchmark. Diabetic retinopathy diagnosis benchmark class. Constructs a benchmark object. Args: level: `Level` or `str, downstream task level. batch_size: (optional) `int`, number of datapoints per mini-batch. data_dir: (optional) `str`, path to parent data directory. download_and_prepare: (optional) `bool`, if the data is not available it downloads and preprocesses it. Evaluates an `estimator` on the `mode` benchmark dataset. Args: estimator: `lambda x: mu_x, uncertainty_x`, an uncertainty estimation function, which returns `mean_x` and predictive `uncertainty_x`. dataset: `tf.data.Dataset`, on which dataset to performance evaluation. output_dir: (optional) `str`, directory to save figures. name: (optional) `str`, the name of the method. # Containers used for caching performance evaluation # Convert to NumPy iterator if necessary # Sample from probabilistic model # Cache predictions # Use vectorized NumPy containers # Metrics for evaluation Evaluate model predictive distribution on `metric_fn` at data retain `fractions`. Args: y_true: `numpy.ndarray`, the ground truth labels, with shape [N]. y_pred: `numpy.ndarray`, the model predictions, with shape [N]. y_uncertainty: `numpy.ndarray`, the model uncertainties, with shape [N]. fractions: `iterable`, the percentages of data to retain for calculating `metric_fn`. metric_fn: `lambda(y_true, y_pred) -> float`, a metric function that provides a score given ground truths and predictions. name: (optional) `str`, the name of the method. Returns: A `pandas.DataFrame` with columns ["retained_data", "mean", "std"], that summarizes the scores at different data retained fractions. # Sorts indexes by ascending uncertainty # Score containers # TODO(filangel): do bootstrap sampling and estimate standard error # Keep only the %-frac of lowest uncertainties # Store Pointer to the processed datasets. Text description of the benchmark. The downstream task level. Loss used for training binary classifiers. Evaluation metrics used for monitoring training. Class weights used for rebalancing the dataset, by skewing the `loss` accordingly. Loads the datasets for the benchmark. Args: level: `Level` or `str, downstream task level. batch_size: (optional) `int`, number of datapoints per mini-batch. data_dir: (optional) `str`, path to parent data directory. as_numpy: (optional) `bool`, if True returns python generators with `numpy.ndarray` outputs. Returns: A namedtuple with properties: * train: `tf.data.Dataset`, train dataset. * validation: `tf.data.Dataset`, validation dataset. * test: `tf.data.Dataset`, test dataset. # Fetch datasets # Parse task level # Dataset tranformations # Apply transformations # Prefetches datasets to memory # Convert to NumPy iterators Downloads dataset from Kaggle, extracts zip files and processes it using `tensorflow_datasets`. Args: levels: (optional) `iterable` of `str`, specifies which levels from {'medium', 'realworld'} to prepare, if None it prepares all the levels. Raises: OSError: if `~/.kaggle/kaggle.json` is not set up. # Disable GPU for data download, extraction and preparation # cls._extract() #cls._prepare(levels) Downloads data from Kaggle using `tensorflow_datasets`. Raises: OSError: if `~/.kaggle/kaggle.json` is not set up. # Append `/home/$USER/.local/bin` to path # Download all files from Kaggle Extracts zip files downloaded from Kaggle. # Extract train and test original images # Extract "<split>.zip.00*"" files to "<split>" # Concatenate "<split>.zip.00*" to "<split>.zip" # Unzip "<split>.zip" to "<split>" # Delete "<split>.zip.00*" files # Extract "sample.zip", "trainLabels.csv.zip" Generates the TFRecord objects for medium and realworld experiments. # Hangle each level individually Applies transformations to the raw data. # Transformation hyperparameters Parses datapoints from raw `tf.data.Dataset`. Returns `as_supervised` tuple. Casts image to `dtype`. Constructs a type caster. Returns casted image (to `dtype`) and its (unchanged) label as tuple. Rescales image to [min, max]=[0, 1]. Returns rescaled image and its (unchanged) label as tuple. # Get augmentation schemes # Transformations for train dataset # TODO(filangel): hangle batch with ImageDataGenerator # transforms.RandomAugment(**augmentation_config), # Transformations for validation/test dataset # TODO(filangel): hangle batch with ImageDataGenerator # transforms.RandomAugment(**no_augmentation_config), Returns the configs for the `tensorflow.keras.preprocessing.image.ImageDataGenerator`, used for the random augmentation of the dataset, following the implementation of https://github.com/chleibig/disease-detection/blob/f3401b26aa9b832ff77afe93 e3faa342f7d088e5/scripts/inspect_data_augmentation.py. | 1.848437 | 2 |
db/redis_db.py | Lifeistrange/WeiboSpider | 1 | 8365 | <reponame>Lifeistrange/WeiboSpider
# coding:utf-8
import datetime
import json
import re
import redis
from config.conf import get_redis_args
redis_args = get_redis_args()
class Cookies(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('cookies'))
rd_con_broker = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('broker'))
@classmethod
def store_cookies(cls, name, cookies):
pickled_cookies = json.dumps(
{'cookies': cookies, 'loginTime': datetime.datetime.now().timestamp()})
cls.rd_con.hset('account', name, pickled_cookies)
cls.rd_con.lpush('account_queue', name)
@classmethod
def fetch_cookies(cls):
for i in range(cls.rd_con.llen('account_queue')):
name = cls.rd_con.rpop('account_queue').decode('utf-8')
if name:
j_account = cls.rd_con.hget('account', name).decode('utf-8')
if j_account:
cls.rd_con.lpush('account_queue', name) # 当账号不存在时,这个name也会清除,并取下一个name
account = json.loads(j_account)
login_time = datetime.datetime.fromtimestamp(account['loginTime'])
if datetime.datetime.now() - login_time > datetime.timedelta(hours=20):
cls.rd_con.hdel('account', name)
continue # 丢弃这个过期账号,account_queue会在下次访问的时候被清除,这里不清除是因为分布式的关系
return name, account['cookies']
else:
return None
@classmethod
def delete_cookies(cls, name):
cls.rd_con.hdel('account', name)
return True
@classmethod
def check_login_task(cls):
if cls.rd_con_broker.llen('login_queue') > 0:
cls.rd_con_broker.delete('login_queue')
class Urls(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('urls'))
@classmethod
def store_crawl_url(cls, url, result):
cls.rd_con.set(url, result)
class IdNames(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('id_name'))
@classmethod
def store_id_name(cls, user_name, user_id):
cls.rd_con.set(user_name, user_id)
@classmethod
def fetch_uid_by_name(cls, user_name):
user_id = cls.rd_con.get(user_name)
if user_id:
return user_id.decode('utf-8')
return ''
| # coding:utf-8
import datetime
import json
import re
import redis
from config.conf import get_redis_args
redis_args = get_redis_args()
class Cookies(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('cookies'))
rd_con_broker = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('broker'))
@classmethod
def store_cookies(cls, name, cookies):
pickled_cookies = json.dumps(
{'cookies': cookies, 'loginTime': datetime.datetime.now().timestamp()})
cls.rd_con.hset('account', name, pickled_cookies)
cls.rd_con.lpush('account_queue', name)
@classmethod
def fetch_cookies(cls):
for i in range(cls.rd_con.llen('account_queue')):
name = cls.rd_con.rpop('account_queue').decode('utf-8')
if name:
j_account = cls.rd_con.hget('account', name).decode('utf-8')
if j_account:
cls.rd_con.lpush('account_queue', name) # 当账号不存在时,这个name也会清除,并取下一个name
account = json.loads(j_account)
login_time = datetime.datetime.fromtimestamp(account['loginTime'])
if datetime.datetime.now() - login_time > datetime.timedelta(hours=20):
cls.rd_con.hdel('account', name)
continue # 丢弃这个过期账号,account_queue会在下次访问的时候被清除,这里不清除是因为分布式的关系
return name, account['cookies']
else:
return None
@classmethod
def delete_cookies(cls, name):
cls.rd_con.hdel('account', name)
return True
@classmethod
def check_login_task(cls):
if cls.rd_con_broker.llen('login_queue') > 0:
cls.rd_con_broker.delete('login_queue')
class Urls(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('urls'))
@classmethod
def store_crawl_url(cls, url, result):
cls.rd_con.set(url, result)
class IdNames(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('id_name'))
@classmethod
def store_id_name(cls, user_name, user_id):
cls.rd_con.set(user_name, user_id)
@classmethod
def fetch_uid_by_name(cls, user_name):
user_id = cls.rd_con.get(user_name)
if user_id:
return user_id.decode('utf-8')
return '' | zh | 0.9377 | # coding:utf-8 # 当账号不存在时,这个name也会清除,并取下一个name # 丢弃这个过期账号,account_queue会在下次访问的时候被清除,这里不清除是因为分布式的关系 | 2.378766 | 2 |
vivisect/storage/mpfile.py | vEpiphyte/vivisect | 0 | 8366 | import base64
import logging
import msgpack
logger = logging.getLogger(__name__)
loadargs = {'use_list': False, 'raw': False}
if msgpack.version < (1, 0, 0):
loadargs['encoding'] = 'utf-8'
else:
loadargs['strict_map_key'] = False
VSIG = b'MSGVIV'.ljust(8, b'\x00')
def vivEventsAppendFile(filename, events):
with open(filename, 'ab') as f:
for event in events:
if event[0] == 20:
mape = base64.b64encode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
msgpack.pack(event, f, use_bin_type=False)
def saveWorkspaceChanges(vw, filename):
events = vw.exportWorkspaceChanges()
vivEventsAppendFile(filename, events)
def vivEventsToFile(filename, events):
with open(filename, 'wb') as f:
msgpack.pack(VSIG, f, use_bin_type=False)
for event in events:
if event[0] == 20:
mape = base64.b64encode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
msgpack.pack(event, f, use_bin_type=False)
def saveWorkspace(vw, filename):
events = vw.exportWorkspace()
vivEventsToFile(filename, events)
def vivEventsFromFile(filename):
events = []
with open(filename, 'rb') as f:
unpacker = msgpack.Unpacker(f, **loadargs)
siggy = next(unpacker)
if siggy.encode('utf-8') != VSIG:
logger.warning('Invalid file signature of %s', str(siggy))
return
for event in unpacker:
if event[0] == 20:
mape = base64.b64decode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
events.append(event)
return events
def loadWorkspace(vw, filename):
events = vivEventsFromFile(filename)
vw.importWorkspace(events)
| import base64
import logging
import msgpack
logger = logging.getLogger(__name__)
loadargs = {'use_list': False, 'raw': False}
if msgpack.version < (1, 0, 0):
loadargs['encoding'] = 'utf-8'
else:
loadargs['strict_map_key'] = False
VSIG = b'MSGVIV'.ljust(8, b'\x00')
def vivEventsAppendFile(filename, events):
with open(filename, 'ab') as f:
for event in events:
if event[0] == 20:
mape = base64.b64encode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
msgpack.pack(event, f, use_bin_type=False)
def saveWorkspaceChanges(vw, filename):
events = vw.exportWorkspaceChanges()
vivEventsAppendFile(filename, events)
def vivEventsToFile(filename, events):
with open(filename, 'wb') as f:
msgpack.pack(VSIG, f, use_bin_type=False)
for event in events:
if event[0] == 20:
mape = base64.b64encode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
msgpack.pack(event, f, use_bin_type=False)
def saveWorkspace(vw, filename):
events = vw.exportWorkspace()
vivEventsToFile(filename, events)
def vivEventsFromFile(filename):
events = []
with open(filename, 'rb') as f:
unpacker = msgpack.Unpacker(f, **loadargs)
siggy = next(unpacker)
if siggy.encode('utf-8') != VSIG:
logger.warning('Invalid file signature of %s', str(siggy))
return
for event in unpacker:
if event[0] == 20:
mape = base64.b64decode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
events.append(event)
return events
def loadWorkspace(vw, filename):
events = vivEventsFromFile(filename)
vw.importWorkspace(events)
| none | 1 | 2.140899 | 2 |
|
pytest_pgsql/plugin.py | mathiasose/pytest-pgsql | 0 | 8367 | <filename>pytest_pgsql/plugin.py
"""This forms the core of the pytest plugin."""
import pytest
import testing.postgresql
from pytest_pgsql import database
from pytest_pgsql import ext
def pytest_addoption(parser):
"""Add configuration options for pytest_pgsql."""
parser.addoption(
'--pg-extensions', action='store', default='',
help="A comma-separated list of PostgreSQL extensions to install at "
"the beginning of the session for use by all tests. Example: "
"--pg-extensions=uuid-ossp,pg_tgrm,pgcrypto")
parser.addoption(
'--pg-work-mem', type=int, default=32,
help='Set the value of the `work_mem` setting, in megabytes. '
'`pytest_pgsql` defaults to 32. Adjusting this up or down can '
'help performance; see the Postgres documentation for more details.')
parser.addoption(
'--pg-conf-opt', action='append',
help='Add a key=value line that will be appended to postgresql.conf')
@pytest.fixture(scope='session')
def database_uri(request):
"""A fixture giving the connection URI of the session-wide test database."""
# Note: due to the nature of the variable configs, the command line options
# must be tested manually.
work_mem = request.config.getoption('--pg-work-mem')
if work_mem < 0: # pragma: no cover
pytest.exit('ERROR: --pg-work-mem value must be >= 0. Got: %d' % work_mem)
return
elif work_mem == 0: # pragma: no cover
# Disable memory tweak and use the server default.
work_mem_setting = ''
else:
# User wants to change the working memory setting.
work_mem_setting = '-c work_mem=%dMB ' % work_mem
conf_opts = request.config.getoption('--pg-conf-opt')
if conf_opts:
conf_opts_string = ' -c ' + ' -c '.join(conf_opts)
else:
conf_opts_string = ''
# pylint: disable=bad-continuation,deprecated-method
with testing.postgresql.Postgresql(
postgres_args='-c TimeZone=UTC '
'-c fsync=off '
'-c synchronous_commit=off '
'-c full_page_writes=off '
+ work_mem_setting +
'-c checkpoint_timeout=30min '
'-c bgwriter_delay=10000ms'
+ conf_opts_string) as pgdb:
yield pgdb.url()
#: A SQLAlchemy engine shared by the transacted and non-transacted database fixtures.
#:
#: .. seealso:: `pytest_pgsql.ext.create_engine_fixture`
# pylint: disable=invalid-name
pg_engine = ext.create_engine_fixture('pg_engine', scope='session')
# pylint: enable=invalid-name
@pytest.fixture(scope='session')
def database_snapshot(pg_engine):
"""Create one database snapshot for the session.
The database will be restored to this state after each test.
.. note ::
This is an implementation detail and should not be used directly except
by derived fixtures.
"""
return database.create_database_snapshot(pg_engine)
# pylint: disable=invalid-name
#: Create a test database instance and cleans up after each test finishes.
#:
#: You should prefer the `transacted_postgresql_db` fixture unless your test
#: cannot be run in a single transaction. The `transacted_postgresql_db` fixture
#: leads to faster tests since it doesn't tear down the entire database between
#: each test.
postgresql_db = \
database.PostgreSQLTestDB.create_fixture('postgresql_db')
#: Create a test database instance that rolls back the current transaction after
#: each test finishes, verifying its integrity before returning.
#:
#: Read the warning in the main documentation page before using this fixture.
transacted_postgresql_db = \
database.TransactedPostgreSQLTestDB.create_fixture('transacted_postgresql_db')
# pylint: enable=invalid-name
| <filename>pytest_pgsql/plugin.py
"""This forms the core of the pytest plugin."""
import pytest
import testing.postgresql
from pytest_pgsql import database
from pytest_pgsql import ext
def pytest_addoption(parser):
"""Add configuration options for pytest_pgsql."""
parser.addoption(
'--pg-extensions', action='store', default='',
help="A comma-separated list of PostgreSQL extensions to install at "
"the beginning of the session for use by all tests. Example: "
"--pg-extensions=uuid-ossp,pg_tgrm,pgcrypto")
parser.addoption(
'--pg-work-mem', type=int, default=32,
help='Set the value of the `work_mem` setting, in megabytes. '
'`pytest_pgsql` defaults to 32. Adjusting this up or down can '
'help performance; see the Postgres documentation for more details.')
parser.addoption(
'--pg-conf-opt', action='append',
help='Add a key=value line that will be appended to postgresql.conf')
@pytest.fixture(scope='session')
def database_uri(request):
"""A fixture giving the connection URI of the session-wide test database."""
# Note: due to the nature of the variable configs, the command line options
# must be tested manually.
work_mem = request.config.getoption('--pg-work-mem')
if work_mem < 0: # pragma: no cover
pytest.exit('ERROR: --pg-work-mem value must be >= 0. Got: %d' % work_mem)
return
elif work_mem == 0: # pragma: no cover
# Disable memory tweak and use the server default.
work_mem_setting = ''
else:
# User wants to change the working memory setting.
work_mem_setting = '-c work_mem=%dMB ' % work_mem
conf_opts = request.config.getoption('--pg-conf-opt')
if conf_opts:
conf_opts_string = ' -c ' + ' -c '.join(conf_opts)
else:
conf_opts_string = ''
# pylint: disable=bad-continuation,deprecated-method
with testing.postgresql.Postgresql(
postgres_args='-c TimeZone=UTC '
'-c fsync=off '
'-c synchronous_commit=off '
'-c full_page_writes=off '
+ work_mem_setting +
'-c checkpoint_timeout=30min '
'-c bgwriter_delay=10000ms'
+ conf_opts_string) as pgdb:
yield pgdb.url()
#: A SQLAlchemy engine shared by the transacted and non-transacted database fixtures.
#:
#: .. seealso:: `pytest_pgsql.ext.create_engine_fixture`
# pylint: disable=invalid-name
pg_engine = ext.create_engine_fixture('pg_engine', scope='session')
# pylint: enable=invalid-name
@pytest.fixture(scope='session')
def database_snapshot(pg_engine):
"""Create one database snapshot for the session.
The database will be restored to this state after each test.
.. note ::
This is an implementation detail and should not be used directly except
by derived fixtures.
"""
return database.create_database_snapshot(pg_engine)
# pylint: disable=invalid-name
#: Create a test database instance and cleans up after each test finishes.
#:
#: You should prefer the `transacted_postgresql_db` fixture unless your test
#: cannot be run in a single transaction. The `transacted_postgresql_db` fixture
#: leads to faster tests since it doesn't tear down the entire database between
#: each test.
postgresql_db = \
database.PostgreSQLTestDB.create_fixture('postgresql_db')
#: Create a test database instance that rolls back the current transaction after
#: each test finishes, verifying its integrity before returning.
#:
#: Read the warning in the main documentation page before using this fixture.
transacted_postgresql_db = \
database.TransactedPostgreSQLTestDB.create_fixture('transacted_postgresql_db')
# pylint: enable=invalid-name
| en | 0.777745 | This forms the core of the pytest plugin. Add configuration options for pytest_pgsql. A fixture giving the connection URI of the session-wide test database. # Note: due to the nature of the variable configs, the command line options # must be tested manually. # pragma: no cover # pragma: no cover # Disable memory tweak and use the server default. # User wants to change the working memory setting. # pylint: disable=bad-continuation,deprecated-method #: A SQLAlchemy engine shared by the transacted and non-transacted database fixtures. #: #: .. seealso:: `pytest_pgsql.ext.create_engine_fixture` # pylint: disable=invalid-name # pylint: enable=invalid-name Create one database snapshot for the session. The database will be restored to this state after each test. .. note :: This is an implementation detail and should not be used directly except by derived fixtures. # pylint: disable=invalid-name #: Create a test database instance and cleans up after each test finishes. #: #: You should prefer the `transacted_postgresql_db` fixture unless your test #: cannot be run in a single transaction. The `transacted_postgresql_db` fixture #: leads to faster tests since it doesn't tear down the entire database between #: each test. #: Create a test database instance that rolls back the current transaction after #: each test finishes, verifying its integrity before returning. #: #: Read the warning in the main documentation page before using this fixture. # pylint: enable=invalid-name | 2.270895 | 2 |
power_data_to_sat_passes/date_utils.py | abrahamneben/orbcomm_beam_mapping | 1 | 8368 | # written by abraham on aug 24
def dyear2date(dyear):
year = int(dyear)
month_lengths = [31,28,31,30,31,30,31,31,30,31,30,31]
days_before_months = [0,31,59,90,120,151,181,212,243,273,304,334]
days_into_year_f = (dyear-year)*365
days_into_year_i = int(days_into_year_f)
for i in range(12):
if days_before_months[i] < days_into_year_f < (days_before_months[i]+month_lengths[i]):
month = i+1
break
date = days_into_year_i - days_before_months[month-1]
hours_f = (days_into_year_f-days_into_year_i)*24
hours_i = int(hours_f)
minutes_f = (hours_f-hours_i)*60
minutes_i = int(minutes_f)
seconds_i = int((minutes_f-minutes_i)*60)
return "%02d/%02d/%d %02d:%02d:%02d" % (month,date,year,hours_i,minutes_i,seconds_i)
| # written by abraham on aug 24
def dyear2date(dyear):
year = int(dyear)
month_lengths = [31,28,31,30,31,30,31,31,30,31,30,31]
days_before_months = [0,31,59,90,120,151,181,212,243,273,304,334]
days_into_year_f = (dyear-year)*365
days_into_year_i = int(days_into_year_f)
for i in range(12):
if days_before_months[i] < days_into_year_f < (days_before_months[i]+month_lengths[i]):
month = i+1
break
date = days_into_year_i - days_before_months[month-1]
hours_f = (days_into_year_f-days_into_year_i)*24
hours_i = int(hours_f)
minutes_f = (hours_f-hours_i)*60
minutes_i = int(minutes_f)
seconds_i = int((minutes_f-minutes_i)*60)
return "%02d/%02d/%d %02d:%02d:%02d" % (month,date,year,hours_i,minutes_i,seconds_i)
| en | 0.985122 | # written by abraham on aug 24 | 3.633322 | 4 |
app/base/count_lines.py | sourcery-ai-bot/personal-expenses-accounting | 5 | 8369 | import glob
from os import walk
exclude_folders = [
'node_modules',
'ios',
'android',
'__pycache__'
]
exclude_files = [
'json',
'txt',
'traineddata',
'lstmf',
'yml',
'md'
'log',
'env',
'gitignore',
'dockerignore'
]
# get all files in directory
dirr = '/home/viktor/Documents/personal-expenses-accounting/app/services/web_service/'
folders = glob.glob(dirr + '/**/', recursive=True)
# only app related directories
directories = []
for folder in folders:
current_folder = folder.split('/')[-2]
if current_folder not in exclude_folders:
files = glob.glob(folder + '*')
print(files)
directories.append(folder)
# num_lines = sum(1 for line in open('myfile.txt'))
| import glob
from os import walk
exclude_folders = [
'node_modules',
'ios',
'android',
'__pycache__'
]
exclude_files = [
'json',
'txt',
'traineddata',
'lstmf',
'yml',
'md'
'log',
'env',
'gitignore',
'dockerignore'
]
# get all files in directory
dirr = '/home/viktor/Documents/personal-expenses-accounting/app/services/web_service/'
folders = glob.glob(dirr + '/**/', recursive=True)
# only app related directories
directories = []
for folder in folders:
current_folder = folder.split('/')[-2]
if current_folder not in exclude_folders:
files = glob.glob(folder + '*')
print(files)
directories.append(folder)
# num_lines = sum(1 for line in open('myfile.txt'))
| en | 0.809791 | # get all files in directory # only app related directories # num_lines = sum(1 for line in open('myfile.txt')) | 3.018135 | 3 |
data/contacts.py | rgurevych/python_for_testers | 0 | 8370 |
from models.contact import Contact
testdata = [Contact(first_name="Firstname", last_name="Lastname", mobile_phone="+12345678",
work_phone="12345", home_phone="67890", fax="55443322", email_1="<EMAIL>",
email_2="<EMAIL>", email_3="<EMAIL>",
address="Street, 15 \n 12345 New-York")]
|
from models.contact import Contact
testdata = [Contact(first_name="Firstname", last_name="Lastname", mobile_phone="+12345678",
work_phone="12345", home_phone="67890", fax="55443322", email_1="<EMAIL>",
email_2="<EMAIL>", email_3="<EMAIL>",
address="Street, 15 \n 12345 New-York")]
| none | 1 | 2.021627 | 2 |
|
charmhelpers/contrib/charmsupport/nrpe.py | nobuto-m/charm-helpers | 0 | 8371 | <filename>charmhelpers/contrib/charmsupport/nrpe.py<gh_stars>0
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compatibility with the nrpe-external-master charm"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# <NAME> <<EMAIL>>
import subprocess
import pwd
import grp
import os
import glob
import shutil
import re
import shlex
import yaml
from charmhelpers.core.hookenv import (
config,
hook_name,
local_unit,
log,
relation_get,
relation_ids,
relation_set,
relations_of_type,
)
from charmhelpers.core.host import service
from charmhelpers.core import host
# This module adds compatibility with the nrpe-external-master and plain nrpe
# subordinate charms. To use it in your charm:
#
# 1. Update metadata.yaml
#
# provides:
# (...)
# nrpe-external-master:
# interface: nrpe-external-master
# scope: container
#
# and/or
#
# provides:
# (...)
# local-monitors:
# interface: local-monitors
# scope: container
#
# 2. Add the following to config.yaml
#
# nagios_context:
# default: "juju"
# type: string
# description: |
# Used by the nrpe subordinate charms.
# A string that will be prepended to instance name to set the host name
# in nagios. So for instance the hostname would be something like:
# juju-myservice-0
# If you're running multiple environments with the same services in them
# this allows you to differentiate between them.
# nagios_servicegroups:
# default: ""
# type: string
# description: |
# A comma-separated list of nagios servicegroups.
# If left empty, the nagios_context will be used as the servicegroup
#
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
#
# 4. Update your hooks.py with something like this:
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE()
# nrpe_compat.add_check(
# shortname = "myservice",
# description = "Check MyService",
# check_cmd = "check_http -w 2 -c 10 http://localhost"
# )
# nrpe_compat.add_check(
# "myservice_other",
# "Check for widget failures",
# check_cmd = "/srv/myapp/scripts/widget_check"
# )
# nrpe_compat.write()
#
# def config_changed():
# (...)
# update_nrpe_config()
#
# def nrpe_external_master_relation_changed():
# update_nrpe_config()
#
# def local_monitors_relation_changed():
# update_nrpe_config()
#
# 4.a If your charm is a subordinate charm set primary=False
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE(primary=False)
#
# 5. ln -s hooks.py nrpe-external-master-relation-changed
# ln -s hooks.py local-monitors-relation-changed
class CheckException(Exception):
pass
class Check(object):
shortname_re = '[A-Za-z0-9-_.@]+$'
service_template = ("""
#---------------------------------------------------
# This file is Juju managed
#---------------------------------------------------
define service {{
use active-service
host_name {nagios_hostname}
service_description {nagios_hostname}[{shortname}] """
"""{description}
check_command check_nrpe!{command}
servicegroups {nagios_servicegroup}
}}
""")
def __init__(self, shortname, description, check_cmd):
super(Check, self).__init__()
# XXX: could be better to calculate this from the service name
if not re.match(self.shortname_re, shortname):
raise CheckException("shortname must match {}".format(
Check.shortname_re))
self.shortname = shortname
self.command = "check_{}".format(shortname)
# Note: a set of invalid characters is defined by the
# Nagios server config
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
self.description = description
self.check_cmd = self._locate_cmd(check_cmd)
def _get_check_filename(self):
return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
def _get_service_filename(self, hostname):
return os.path.join(NRPE.nagios_exportdir,
'service__{}_{}.cfg'.format(hostname, self.command))
def _locate_cmd(self, check_cmd):
search_path = (
'/usr/lib/nagios/plugins',
'/usr/local/lib/nagios/plugins',
)
parts = shlex.split(check_cmd)
for path in search_path:
if os.path.exists(os.path.join(path, parts[0])):
command = os.path.join(path, parts[0])
if len(parts) > 1:
command += " " + " ".join(parts[1:])
return command
log('Check command not found: {}'.format(parts[0]))
return ''
def _remove_service_files(self):
if not os.path.exists(NRPE.nagios_exportdir):
return
for f in os.listdir(NRPE.nagios_exportdir):
if f.endswith('_{}.cfg'.format(self.command)):
os.remove(os.path.join(NRPE.nagios_exportdir, f))
def remove(self, hostname):
nrpe_check_file = self._get_check_filename()
if os.path.exists(nrpe_check_file):
os.remove(nrpe_check_file)
self._remove_service_files()
def write(self, nagios_context, hostname, nagios_servicegroups):
nrpe_check_file = self._get_check_filename()
with open(nrpe_check_file, 'w') as nrpe_check_config:
nrpe_check_config.write("# check {}\n".format(self.shortname))
if nagios_servicegroups:
nrpe_check_config.write(
"# The following header was added automatically by juju\n")
nrpe_check_config.write(
"# Modifying it will affect nagios monitoring and alerting\n")
nrpe_check_config.write(
"# servicegroups: {}\n".format(nagios_servicegroups))
nrpe_check_config.write("command[{}]={}\n".format(
self.command, self.check_cmd))
if not os.path.exists(NRPE.nagios_exportdir):
log('Not writing service config as {} is not accessible'.format(
NRPE.nagios_exportdir))
else:
self.write_service_config(nagios_context, hostname,
nagios_servicegroups)
def write_service_config(self, nagios_context, hostname,
nagios_servicegroups):
self._remove_service_files()
templ_vars = {
'nagios_hostname': hostname,
'nagios_servicegroup': nagios_servicegroups,
'description': self.description,
'shortname': self.shortname,
'command': self.command,
}
nrpe_service_text = Check.service_template.format(**templ_vars)
nrpe_service_file = self._get_service_filename(hostname)
with open(nrpe_service_file, 'w') as nrpe_service_config:
nrpe_service_config.write(str(nrpe_service_text))
def run(self):
subprocess.call(self.check_cmd)
class NRPE(object):
nagios_logdir = '/var/log/nagios'
nagios_exportdir = '/var/lib/nagios/export'
nrpe_confdir = '/etc/nagios/nrpe.d'
homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server
def __init__(self, hostname=None, primary=True):
super(NRPE, self).__init__()
self.config = config()
self.primary = primary
self.nagios_context = self.config['nagios_context']
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
self.nagios_servicegroups = self.config['nagios_servicegroups']
else:
self.nagios_servicegroups = self.nagios_context
self.unit_name = local_unit().replace('/', '-')
if hostname:
self.hostname = hostname
else:
nagios_hostname = get_nagios_hostname()
if nagios_hostname:
self.hostname = nagios_hostname
else:
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
self.checks = []
# Iff in an nrpe-external-master relation hook, set primary status
relation = relation_ids('nrpe-external-master')
if relation:
log("Setting charm primary status {}".format(primary))
for rid in relation:
relation_set(relation_id=rid, relation_settings={'primary': self.primary})
self.remove_check_queue = set()
def add_check(self, *args, **kwargs):
shortname = None
if kwargs.get('shortname') is None:
if len(args) > 0:
shortname = args[0]
else:
shortname = kwargs['shortname']
self.checks.append(Check(*args, **kwargs))
try:
self.remove_check_queue.remove(shortname)
except KeyError:
pass
def remove_check(self, *args, **kwargs):
if kwargs.get('shortname') is None:
raise ValueError('shortname of check must be specified')
# Use sensible defaults if they're not specified - these are not
# actually used during removal, but they're required for constructing
# the Check object; check_disk is chosen because it's part of the
# nagios-plugins-basic package.
if kwargs.get('check_cmd') is None:
kwargs['check_cmd'] = 'check_disk'
if kwargs.get('description') is None:
kwargs['description'] = ''
check = Check(*args, **kwargs)
check.remove(self.hostname)
self.remove_check_queue.add(kwargs['shortname'])
def write(self):
try:
nagios_uid = pwd.getpwnam('nagios').pw_uid
nagios_gid = grp.getgrnam('nagios').gr_gid
except Exception:
log("Nagios user not set up, nrpe checks not updated")
return
if not os.path.exists(NRPE.nagios_logdir):
os.mkdir(NRPE.nagios_logdir)
os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
nrpe_monitors = {}
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
for nrpecheck in self.checks:
nrpecheck.write(self.nagios_context, self.hostname,
self.nagios_servicegroups)
nrpe_monitors[nrpecheck.shortname] = {
"command": nrpecheck.command,
}
# update-status hooks are configured to firing every 5 minutes by
# default. When nagios-nrpe-server is restarted, the nagios server
# reports checks failing causing unnecessary alerts. Let's not restart
# on update-status hooks.
if not hook_name() == 'update-status':
service('restart', 'nagios-nrpe-server')
monitor_ids = relation_ids("local-monitors") + \
relation_ids("nrpe-external-master")
for rid in monitor_ids:
reldata = relation_get(unit=local_unit(), rid=rid)
if 'monitors' in reldata:
# update the existing set of monitors with the new data
old_monitors = yaml.safe_load(reldata['monitors'])
old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
# remove keys that are in the remove_check_queue
old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
if k not in self.remove_check_queue}
# update/add nrpe_monitors
old_nrpe_monitors.update(nrpe_monitors)
old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
# write back to the relation
relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
else:
# write a brand new set of monitors, as no existing ones.
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
self.remove_check_queue.clear()
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
"""
Query relation with nrpe subordinate, return the nagios_host_context
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
if 'nagios_host_context' in rel:
return rel['nagios_host_context']
def get_nagios_hostname(relation_name='nrpe-external-master'):
"""
Query relation with nrpe subordinate, return the nagios_hostname
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
if 'nagios_hostname' in rel:
return rel['nagios_hostname']
def get_nagios_unit_name(relation_name='nrpe-external-master'):
"""
Return the nagios unit name prepended with host_context if needed
:param str relation_name: Name of relation nrpe sub joined to
"""
host_context = get_nagios_hostcontext(relation_name)
if host_context:
unit = "%s:%s" % (host_context, local_unit())
else:
unit = local_unit()
return unit
def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param list services: List of services to check
:param str unit_name: Unit name to use in check description
:param bool immediate_check: For sysv init, run the service check immediately
"""
for svc in services:
# Don't add a check for these services from neutron-gateway
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
next
upstart_init = '/etc/init/%s.conf' % svc
sysv_init = '/etc/init.d/%s' % svc
if host.init_is_systemd():
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_systemd.py %s' % svc
)
elif os.path.exists(upstart_init):
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_upstart_job %s' % svc
)
elif os.path.exists(sysv_init):
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
croncmd = (
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
'-e -s /etc/init.d/%s status' % svc
)
cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
f = open(cronpath, 'w')
f.write(cron_file)
f.close()
nrpe.add_check(
shortname=svc,
description='service check {%s}' % unit_name,
check_cmd='check_status_file.py -f %s' % checkpath,
)
# if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
# (LP: #1670223).
if immediate_check and os.path.isdir(nrpe.homedir):
f = open(checkpath, 'w')
subprocess.call(
croncmd.split(),
stdout=f,
stderr=subprocess.STDOUT
)
f.close()
os.chmod(checkpath, 0o644)
def copy_nrpe_checks(nrpe_files_dir=None):
"""
Copy the nrpe checks into place
"""
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
if nrpe_files_dir is None:
# determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
for segment in ['.', 'hooks']:
nrpe_files_dir = os.path.abspath(os.path.join(
os.getenv('CHARM_DIR'),
segment,
'charmhelpers',
'contrib',
'openstack',
'files'))
if os.path.isdir(nrpe_files_dir):
break
else:
raise RuntimeError("Couldn't find charmhelpers directory")
if not os.path.exists(NAGIOS_PLUGINS):
os.makedirs(NAGIOS_PLUGINS)
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
if os.path.isfile(fname):
shutil.copy2(fname,
os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
def add_haproxy_checks(nrpe, unit_name):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param str unit_name: Unit name to use in check description
"""
nrpe.add_check(
shortname='haproxy_servers',
description='Check HAProxy {%s}' % unit_name,
check_cmd='check_haproxy.sh')
nrpe.add_check(
shortname='haproxy_queue',
description='Check HAProxy queue depth {%s}' % unit_name,
check_cmd='check_haproxy_queue_depth.sh')
| <filename>charmhelpers/contrib/charmsupport/nrpe.py<gh_stars>0
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compatibility with the nrpe-external-master charm"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# <NAME> <<EMAIL>>
import subprocess
import pwd
import grp
import os
import glob
import shutil
import re
import shlex
import yaml
from charmhelpers.core.hookenv import (
config,
hook_name,
local_unit,
log,
relation_get,
relation_ids,
relation_set,
relations_of_type,
)
from charmhelpers.core.host import service
from charmhelpers.core import host
# This module adds compatibility with the nrpe-external-master and plain nrpe
# subordinate charms. To use it in your charm:
#
# 1. Update metadata.yaml
#
# provides:
# (...)
# nrpe-external-master:
# interface: nrpe-external-master
# scope: container
#
# and/or
#
# provides:
# (...)
# local-monitors:
# interface: local-monitors
# scope: container
#
# 2. Add the following to config.yaml
#
# nagios_context:
# default: "juju"
# type: string
# description: |
# Used by the nrpe subordinate charms.
# A string that will be prepended to instance name to set the host name
# in nagios. So for instance the hostname would be something like:
# juju-myservice-0
# If you're running multiple environments with the same services in them
# this allows you to differentiate between them.
# nagios_servicegroups:
# default: ""
# type: string
# description: |
# A comma-separated list of nagios servicegroups.
# If left empty, the nagios_context will be used as the servicegroup
#
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
#
# 4. Update your hooks.py with something like this:
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE()
# nrpe_compat.add_check(
# shortname = "myservice",
# description = "Check MyService",
# check_cmd = "check_http -w 2 -c 10 http://localhost"
# )
# nrpe_compat.add_check(
# "myservice_other",
# "Check for widget failures",
# check_cmd = "/srv/myapp/scripts/widget_check"
# )
# nrpe_compat.write()
#
# def config_changed():
# (...)
# update_nrpe_config()
#
# def nrpe_external_master_relation_changed():
# update_nrpe_config()
#
# def local_monitors_relation_changed():
# update_nrpe_config()
#
# 4.a If your charm is a subordinate charm set primary=False
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE(primary=False)
#
# 5. ln -s hooks.py nrpe-external-master-relation-changed
# ln -s hooks.py local-monitors-relation-changed
class CheckException(Exception):
pass
class Check(object):
shortname_re = '[A-Za-z0-9-_.@]+$'
service_template = ("""
#---------------------------------------------------
# This file is Juju managed
#---------------------------------------------------
define service {{
use active-service
host_name {nagios_hostname}
service_description {nagios_hostname}[{shortname}] """
"""{description}
check_command check_nrpe!{command}
servicegroups {nagios_servicegroup}
}}
""")
def __init__(self, shortname, description, check_cmd):
super(Check, self).__init__()
# XXX: could be better to calculate this from the service name
if not re.match(self.shortname_re, shortname):
raise CheckException("shortname must match {}".format(
Check.shortname_re))
self.shortname = shortname
self.command = "check_{}".format(shortname)
# Note: a set of invalid characters is defined by the
# Nagios server config
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
self.description = description
self.check_cmd = self._locate_cmd(check_cmd)
def _get_check_filename(self):
return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
def _get_service_filename(self, hostname):
return os.path.join(NRPE.nagios_exportdir,
'service__{}_{}.cfg'.format(hostname, self.command))
def _locate_cmd(self, check_cmd):
search_path = (
'/usr/lib/nagios/plugins',
'/usr/local/lib/nagios/plugins',
)
parts = shlex.split(check_cmd)
for path in search_path:
if os.path.exists(os.path.join(path, parts[0])):
command = os.path.join(path, parts[0])
if len(parts) > 1:
command += " " + " ".join(parts[1:])
return command
log('Check command not found: {}'.format(parts[0]))
return ''
def _remove_service_files(self):
if not os.path.exists(NRPE.nagios_exportdir):
return
for f in os.listdir(NRPE.nagios_exportdir):
if f.endswith('_{}.cfg'.format(self.command)):
os.remove(os.path.join(NRPE.nagios_exportdir, f))
def remove(self, hostname):
nrpe_check_file = self._get_check_filename()
if os.path.exists(nrpe_check_file):
os.remove(nrpe_check_file)
self._remove_service_files()
def write(self, nagios_context, hostname, nagios_servicegroups):
nrpe_check_file = self._get_check_filename()
with open(nrpe_check_file, 'w') as nrpe_check_config:
nrpe_check_config.write("# check {}\n".format(self.shortname))
if nagios_servicegroups:
nrpe_check_config.write(
"# The following header was added automatically by juju\n")
nrpe_check_config.write(
"# Modifying it will affect nagios monitoring and alerting\n")
nrpe_check_config.write(
"# servicegroups: {}\n".format(nagios_servicegroups))
nrpe_check_config.write("command[{}]={}\n".format(
self.command, self.check_cmd))
if not os.path.exists(NRPE.nagios_exportdir):
log('Not writing service config as {} is not accessible'.format(
NRPE.nagios_exportdir))
else:
self.write_service_config(nagios_context, hostname,
nagios_servicegroups)
def write_service_config(self, nagios_context, hostname,
nagios_servicegroups):
self._remove_service_files()
templ_vars = {
'nagios_hostname': hostname,
'nagios_servicegroup': nagios_servicegroups,
'description': self.description,
'shortname': self.shortname,
'command': self.command,
}
nrpe_service_text = Check.service_template.format(**templ_vars)
nrpe_service_file = self._get_service_filename(hostname)
with open(nrpe_service_file, 'w') as nrpe_service_config:
nrpe_service_config.write(str(nrpe_service_text))
def run(self):
subprocess.call(self.check_cmd)
class NRPE(object):
nagios_logdir = '/var/log/nagios'
nagios_exportdir = '/var/lib/nagios/export'
nrpe_confdir = '/etc/nagios/nrpe.d'
homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server
def __init__(self, hostname=None, primary=True):
super(NRPE, self).__init__()
self.config = config()
self.primary = primary
self.nagios_context = self.config['nagios_context']
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
self.nagios_servicegroups = self.config['nagios_servicegroups']
else:
self.nagios_servicegroups = self.nagios_context
self.unit_name = local_unit().replace('/', '-')
if hostname:
self.hostname = hostname
else:
nagios_hostname = get_nagios_hostname()
if nagios_hostname:
self.hostname = nagios_hostname
else:
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
self.checks = []
# Iff in an nrpe-external-master relation hook, set primary status
relation = relation_ids('nrpe-external-master')
if relation:
log("Setting charm primary status {}".format(primary))
for rid in relation:
relation_set(relation_id=rid, relation_settings={'primary': self.primary})
self.remove_check_queue = set()
def add_check(self, *args, **kwargs):
shortname = None
if kwargs.get('shortname') is None:
if len(args) > 0:
shortname = args[0]
else:
shortname = kwargs['shortname']
self.checks.append(Check(*args, **kwargs))
try:
self.remove_check_queue.remove(shortname)
except KeyError:
pass
def remove_check(self, *args, **kwargs):
if kwargs.get('shortname') is None:
raise ValueError('shortname of check must be specified')
# Use sensible defaults if they're not specified - these are not
# actually used during removal, but they're required for constructing
# the Check object; check_disk is chosen because it's part of the
# nagios-plugins-basic package.
if kwargs.get('check_cmd') is None:
kwargs['check_cmd'] = 'check_disk'
if kwargs.get('description') is None:
kwargs['description'] = ''
check = Check(*args, **kwargs)
check.remove(self.hostname)
self.remove_check_queue.add(kwargs['shortname'])
def write(self):
try:
nagios_uid = pwd.getpwnam('nagios').pw_uid
nagios_gid = grp.getgrnam('nagios').gr_gid
except Exception:
log("Nagios user not set up, nrpe checks not updated")
return
if not os.path.exists(NRPE.nagios_logdir):
os.mkdir(NRPE.nagios_logdir)
os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
nrpe_monitors = {}
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
for nrpecheck in self.checks:
nrpecheck.write(self.nagios_context, self.hostname,
self.nagios_servicegroups)
nrpe_monitors[nrpecheck.shortname] = {
"command": nrpecheck.command,
}
# update-status hooks are configured to firing every 5 minutes by
# default. When nagios-nrpe-server is restarted, the nagios server
# reports checks failing causing unnecessary alerts. Let's not restart
# on update-status hooks.
if not hook_name() == 'update-status':
service('restart', 'nagios-nrpe-server')
monitor_ids = relation_ids("local-monitors") + \
relation_ids("nrpe-external-master")
for rid in monitor_ids:
reldata = relation_get(unit=local_unit(), rid=rid)
if 'monitors' in reldata:
# update the existing set of monitors with the new data
old_monitors = yaml.safe_load(reldata['monitors'])
old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
# remove keys that are in the remove_check_queue
old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
if k not in self.remove_check_queue}
# update/add nrpe_monitors
old_nrpe_monitors.update(nrpe_monitors)
old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
# write back to the relation
relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
else:
# write a brand new set of monitors, as no existing ones.
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
self.remove_check_queue.clear()
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
"""
Query relation with nrpe subordinate, return the nagios_host_context
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
if 'nagios_host_context' in rel:
return rel['nagios_host_context']
def get_nagios_hostname(relation_name='nrpe-external-master'):
"""
Query relation with nrpe subordinate, return the nagios_hostname
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
if 'nagios_hostname' in rel:
return rel['nagios_hostname']
def get_nagios_unit_name(relation_name='nrpe-external-master'):
"""
Return the nagios unit name prepended with host_context if needed
:param str relation_name: Name of relation nrpe sub joined to
"""
host_context = get_nagios_hostcontext(relation_name)
if host_context:
unit = "%s:%s" % (host_context, local_unit())
else:
unit = local_unit()
return unit
def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param list services: List of services to check
:param str unit_name: Unit name to use in check description
:param bool immediate_check: For sysv init, run the service check immediately
"""
for svc in services:
# Don't add a check for these services from neutron-gateway
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
next
upstart_init = '/etc/init/%s.conf' % svc
sysv_init = '/etc/init.d/%s' % svc
if host.init_is_systemd():
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_systemd.py %s' % svc
)
elif os.path.exists(upstart_init):
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_upstart_job %s' % svc
)
elif os.path.exists(sysv_init):
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
croncmd = (
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
'-e -s /etc/init.d/%s status' % svc
)
cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
f = open(cronpath, 'w')
f.write(cron_file)
f.close()
nrpe.add_check(
shortname=svc,
description='service check {%s}' % unit_name,
check_cmd='check_status_file.py -f %s' % checkpath,
)
# if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
# (LP: #1670223).
if immediate_check and os.path.isdir(nrpe.homedir):
f = open(checkpath, 'w')
subprocess.call(
croncmd.split(),
stdout=f,
stderr=subprocess.STDOUT
)
f.close()
os.chmod(checkpath, 0o644)
def copy_nrpe_checks(nrpe_files_dir=None):
"""
Copy the nrpe checks into place
"""
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
if nrpe_files_dir is None:
# determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
for segment in ['.', 'hooks']:
nrpe_files_dir = os.path.abspath(os.path.join(
os.getenv('CHARM_DIR'),
segment,
'charmhelpers',
'contrib',
'openstack',
'files'))
if os.path.isdir(nrpe_files_dir):
break
else:
raise RuntimeError("Couldn't find charmhelpers directory")
if not os.path.exists(NAGIOS_PLUGINS):
os.makedirs(NAGIOS_PLUGINS)
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
if os.path.isfile(fname):
shutil.copy2(fname,
os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
def add_haproxy_checks(nrpe, unit_name):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param str unit_name: Unit name to use in check description
"""
nrpe.add_check(
shortname='haproxy_servers',
description='Check HAProxy {%s}' % unit_name,
check_cmd='check_haproxy.sh')
nrpe.add_check(
shortname='haproxy_queue',
description='Check HAProxy queue depth {%s}' % unit_name,
check_cmd='check_haproxy_queue_depth.sh')
| en | 0.69985 | # Copyright 2014-2015 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Compatibility with the nrpe-external-master charm # Copyright 2012 Canonical Ltd. # # Authors: # <NAME> <<EMAIL>> # This module adds compatibility with the nrpe-external-master and plain nrpe # subordinate charms. To use it in your charm: # # 1. Update metadata.yaml # # provides: # (...) # nrpe-external-master: # interface: nrpe-external-master # scope: container # # and/or # # provides: # (...) # local-monitors: # interface: local-monitors # scope: container # # 2. Add the following to config.yaml # # nagios_context: # default: "juju" # type: string # description: | # Used by the nrpe subordinate charms. # A string that will be prepended to instance name to set the host name # in nagios. So for instance the hostname would be something like: # juju-myservice-0 # If you're running multiple environments with the same services in them # this allows you to differentiate between them. # nagios_servicegroups: # default: "" # type: string # description: | # A comma-separated list of nagios servicegroups. # If left empty, the nagios_context will be used as the servicegroup # # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master # # 4. Update your hooks.py with something like this: # # from charmsupport.nrpe import NRPE # (...) # def update_nrpe_config(): # nrpe_compat = NRPE() # nrpe_compat.add_check( # shortname = "myservice", # description = "Check MyService", # check_cmd = "check_http -w 2 -c 10 http://localhost" # ) # nrpe_compat.add_check( # "myservice_other", # "Check for widget failures", # check_cmd = "/srv/myapp/scripts/widget_check" # ) # nrpe_compat.write() # # def config_changed(): # (...) # update_nrpe_config() # # def nrpe_external_master_relation_changed(): # update_nrpe_config() # # def local_monitors_relation_changed(): # update_nrpe_config() # # 4.a If your charm is a subordinate charm set primary=False # # from charmsupport.nrpe import NRPE # (...) # def update_nrpe_config(): # nrpe_compat = NRPE(primary=False) # # 5. ln -s hooks.py nrpe-external-master-relation-changed # ln -s hooks.py local-monitors-relation-changed #--------------------------------------------------- # This file is Juju managed #--------------------------------------------------- define service {{ use active-service host_name {nagios_hostname} service_description {nagios_hostname}[{shortname}] {description} check_command check_nrpe!{command} servicegroups {nagios_servicegroup} }} # XXX: could be better to calculate this from the service name # Note: a set of invalid characters is defined by the # Nagios server config # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= # home dir provided by nagios-nrpe-server # Iff in an nrpe-external-master relation hook, set primary status # Use sensible defaults if they're not specified - these are not # actually used during removal, but they're required for constructing # the Check object; check_disk is chosen because it's part of the # nagios-plugins-basic package. # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server # reports checks failing causing unnecessary alerts. Let's not restart # on update-status hooks. # update the existing set of monitors with the new data # remove keys that are in the remove_check_queue # update/add nrpe_monitors # write back to the relation # write a brand new set of monitors, as no existing ones. Query relation with nrpe subordinate, return the nagios_host_context :param str relation_name: Name of relation nrpe sub joined to Query relation with nrpe subordinate, return the nagios_hostname :param str relation_name: Name of relation nrpe sub joined to Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to Add checks for each service in list :param NRPE nrpe: NRPE object to add check to :param list services: List of services to check :param str unit_name: Unit name to use in check description :param bool immediate_check: For sysv init, run the service check immediately # Don't add a check for these services from neutron-gateway # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail # (LP: #1670223). Copy the nrpe checks into place # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks Add checks for each service in list :param NRPE nrpe: NRPE object to add check to :param str unit_name: Unit name to use in check description | 1.68381 | 2 |
venv/Lib/site-packages/proglog/proglog.py | mintzer/pupillometry-rf-back | 83 | 8372 | """Implements the generic progress logger class, and the ProgressBar class.
"""
from tqdm import tqdm, tqdm_notebook
from collections import OrderedDict
import time
SETTINGS = {
'notebook': False
}
def notebook(turn='on'):
SETTINGS['notebook'] = True if (turn == 'on') else False
def troncate_string(s, max_length=25):
return s if (len(s) < max_length) else (s[:max_length] + "...")
class ProgressLogger:
"""Generic class for progress loggers.
A progress logger contains a "state" dictionnary.
Parameters
----------
init_state
Dictionnary representing the initial state.
"""
def __init__(self, init_state=None):
self.state = {}
self.stored = {}
self.logs = []
self.log_indent = 0
if init_state is not None:
self.state.update(init_state)
def log(self, message):
self.logs.append((' ' * self.log_indent) + message)
def dump_logs(self, filepath=None):
if filepath is not None:
with open(filepath, 'a') as f:
f.write("\n".join(self.logs))
else:
return "\n".join(self.logs)
def callback(self, **kw):
"""Execute something after the state has been updated by the given
state elements.
This default callback does nothing, overwrite it by subclassing
"""
pass
def store(self, **kw):
"""Store objects in the logger and trigger ``self.store_callback``.
This works exactly like ``logger()``, but the later is meant for simple
data objects (text, numbers) that will be sent over the network or
written to a file. The ``store`` method expects rather large objects
which are not necessarily serializable, and will be used eg to draw
plots on the fly.
"""
self.stored.update(kw)
self.store_callback(**kw)
def store_callback(self, **kw):
"""Execute something after the store has been updated by the given
state elements.
This default callback does nothing, overwrite it by subclassing
"""
pass
def iter(self, **kw):
"""Iterate through a list while updating the state.
Examples
--------
>>> for username in logger.iter(user=['tom', 'tim', 'lea']:
>>> # At every loop, logger.state['user'] is updated
>>> print (username)
"""
for field, iterable in kw.items():
for it in iterable:
self(**{field: it})
yield it
def __call__(self, **kw):
self.state.update(kw)
self.callback(**kw)
class ProgressBarLogger(ProgressLogger):
"""Generic class for progress loggers.
A progress logger contains a "state" dictionnary
Parameters
----------
init_state
Initial state of the logger
bars
Either None (will be initialized with no bar) or a list/tuple of bar
names (``['main', 'sub']``) which will be initialized with index -1 and
no total, or a dictionary (possibly ordered) of bars, of the form
``{bar_1: {title: 'bar1', index: 2, total:23}, bar_2: {...}}``
ignored_bars
Either None (newly met bars will be added) or a list of blacklisted bar
names, or ``'all_others'`` to signify that all bar names not already in
``self.bars`` will be ignored.
"""
bar_indent = 2
def __init__(self, init_state=None, bars=None, ignored_bars=None,
logged_bars='all', min_time_interval=0, ignore_bars_under=0):
ProgressLogger.__init__(self, init_state)
if bars is None:
bars = OrderedDict()
elif isinstance(bars, (list, tuple)):
bars = OrderedDict([
(b, dict(title=b, index=-1, total=None, message=None,
indent=0))
for b in bars
])
if isinstance(ignored_bars, (list, tuple)):
ignored_bars = set(ignored_bars)
self.ignored_bars = ignored_bars
self.logged_bars = logged_bars
self.state['bars'] = bars
self.min_time_interval = min_time_interval
self.ignore_bars_under = ignore_bars_under
@property
def bars(self):
"""Return ``self.state['bars'].``"""
return self.state['bars']
def bar_is_ignored(self, bar):
if self.ignored_bars is None:
return False
elif self.ignored_bars == 'all_others':
return (bar not in self.bars)
else:
return bar in self.ignored_bars
def bar_is_logged(self, bar):
if (not self.logged_bars):
return False
elif self.logged_bars == 'all':
return True
else:
return bar in self.logged_bars
def iterable_is_too_short(self, iterable):
length = len(iterable) if hasattr(iterable, '__len__') else None
return (length is not None) and (length < self.ignore_bars_under)
def iter_bar(self, bar_prefix='', **kw):
"""Iterate through a list while updating a state bar.
Examples
--------
>>> for username in logger.iter_bar(user=['tom', 'tim', 'lea']):
>>> # At every loop, logger.state['bars']['user'] is updated
>>> # to {index: i, total: 3, title:'user'}
>>> print (username)
"""
if 'bar_message' in kw:
bar_message = kw.pop('bar_message')
else:
bar_message = None
bar, iterable = kw.popitem()
if self.bar_is_ignored(bar) or self.iterable_is_too_short(iterable):
return iterable
bar = bar_prefix + bar
if hasattr(iterable, '__len__'):
self(**{bar + '__total': len(iterable)})
def new_iterable():
last_time = time.time()
i = 0 # necessary in case the iterator is empty
for i, it in enumerate(iterable):
now_time = time.time()
if (i == 0) or (now_time - last_time > self.min_time_interval):
if bar_message is not None:
self(**{bar + '__message': bar_message(it)})
self(**{bar + '__index': i})
last_time = now_time
yield it
if self.bars[bar]['index'] != i:
self(**{bar + '__index': i})
self(**{bar + '__index': i + 1})
return new_iterable()
def bars_callback(self, bar, attr, value, old_value=None):
"""Execute a custom action after the progress bars are updated.
Parameters
----------
bar
Name/ID of the bar to be modified.
attr
Attribute of the bar attribute to be modified
value
New value of the attribute
old_value
Previous value of this bar's attribute.
This default callback does nothing, overwrite it by subclassing.
"""
pass
def __call__(self, **kw):
items = sorted(kw.items(), key=lambda kv: not kv[0].endswith('total'))
for key, value in items:
if '__' in key:
bar, attr = key.split('__')
if self.bar_is_ignored(bar):
continue
kw.pop(key)
if bar not in self.bars:
self.bars[bar] = dict(title=bar, index=-1,
total=None, message=None)
old_value = self.bars[bar][attr]
if self.bar_is_logged(bar):
new_bar = (attr == 'index') and (value < old_value)
if (attr == 'total') or (new_bar):
self.bars[bar]['indent'] = self.log_indent
else:
self.log_indent = self.bars[bar]['indent']
self.log("[%s] %s: %s" % (bar, attr, value))
self.log_indent += self.bar_indent
self.bars[bar][attr] = value
self.bars_callback(bar, attr, value, old_value)
self.state.update(kw)
self.callback(**kw)
class TqdmProgressBarLogger(ProgressBarLogger):
"""Tqdm-powered progress bar for console or Notebooks.
Parameters
----------
init_state
Initial state of the logger
bars
Either None (will be initialized with no bar) or a list/tuple of bar
names (``['main', 'sub']``) which will be initialized with index -1 and
no total, or a dictionary (possibly ordered) of bars, of the form
``{bar_1: {title: 'bar1', index: 2, total:23}, bar_2: {...}}``
ignored_bars
Either None (newly met bars will be added) or a list of blacklisted bar
names, or ``'all_others'`` to signify that all bar names not already in
``self.bars`` will be ignored.
leave_bars
notebook
True will make the bars look nice (HTML) in the jupyter notebook. It is
advised to leave to 'default' as the default can be globally set from
inside a notebook with ``import proglog; proglog.notebook_mode()``.
print_messages
If True, every ``logger(message='something')`` will print a message in
the console / notebook
"""
def __init__(self, init_state=None, bars=None, leave_bars=False,
ignored_bars=None, logged_bars='all', notebook='default',
print_messages=True, min_time_interval=0,
ignore_bars_under=0):
ProgressBarLogger.__init__(self, init_state=init_state, bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
ignore_bars_under=ignore_bars_under,
min_time_interval=min_time_interval)
self.leave_bars = leave_bars
self.tqdm_bars = OrderedDict([
(bar, None)
for bar in self.bars
])
if notebook == 'default':
notebook = SETTINGS['notebook']
self.notebook = notebook
self.print_messages = print_messages
self.tqdm = (tqdm_notebook if self.notebook else tqdm)
def new_tqdm_bar(self, bar):
"""Create a new tqdm bar, possibly replacing an existing one."""
if (bar in self.tqdm_bars) and (self.tqdm_bars[bar] is not None):
self.close_tqdm_bar(bar)
infos = self.bars[bar]
self.tqdm_bars[bar] = self.tqdm(
total=infos['total'],
desc=infos['title'],
postfix=dict(now=troncate_string(str(infos['message']))),
leave=self.leave_bars
)
def close_tqdm_bar(self, bar):
"""Close and erase the tqdm bar"""
self.tqdm_bars[bar].close()
if not self.notebook:
self.tqdm_bars[bar] = None
def bars_callback(self, bar, attr, value, old_value):
if (bar not in self.tqdm_bars) or (self.tqdm_bars[bar] is None):
self.new_tqdm_bar(bar)
if attr == 'index':
if value >= old_value:
total = self.bars[bar]['total']
if total and (value >= total):
self.close_tqdm_bar(bar)
else:
self.tqdm_bars[bar].update(value - old_value)
else:
self.new_tqdm_bar(bar)
self.tqdm_bars[bar].update(value + 1)
elif attr == 'message':
self.tqdm_bars[bar].set_postfix(now=troncate_string(str(value)))
self.tqdm_bars[bar].update(0)
def callback(self, **kw):
if self.print_messages and ('message' in kw) and kw['message']:
if self.notebook:
print(kw['message'])
else:
self.tqdm.write(kw['message'])
class RqWorkerProgressLogger:
def __init__(self, job):
self.job = job
if 'progress_data' not in self.job.meta:
self.job.meta['progress_data'] = {}
self.job.save()
def callback(self, **kw):
self.job.meta['progress_data'] = self.state
self.job.save()
class RqWorkerBarLogger(RqWorkerProgressLogger, ProgressBarLogger):
def __init__(self, job, init_state=None, bars=None, ignored_bars=(),
logged_bars='all', min_time_interval=0):
RqWorkerProgressLogger.__init__(self, job)
ProgressBarLogger.__init__(self, init_state=init_state, bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
min_time_interval=min_time_interval)
class MuteProgressBarLogger(ProgressBarLogger):
def bar_is_ignored(self, bar):
return True
def default_bar_logger(logger, bars=None, ignored_bars=None, logged_bars='all',
min_time_interval=0, ignore_bars_under=0):
if logger == 'bar':
return TqdmProgressBarLogger(
bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
min_time_interval=min_time_interval,
ignore_bars_under=ignore_bars_under
)
elif logger is None:
return MuteProgressBarLogger()
else:
return logger
| """Implements the generic progress logger class, and the ProgressBar class.
"""
from tqdm import tqdm, tqdm_notebook
from collections import OrderedDict
import time
SETTINGS = {
'notebook': False
}
def notebook(turn='on'):
SETTINGS['notebook'] = True if (turn == 'on') else False
def troncate_string(s, max_length=25):
return s if (len(s) < max_length) else (s[:max_length] + "...")
class ProgressLogger:
"""Generic class for progress loggers.
A progress logger contains a "state" dictionnary.
Parameters
----------
init_state
Dictionnary representing the initial state.
"""
def __init__(self, init_state=None):
self.state = {}
self.stored = {}
self.logs = []
self.log_indent = 0
if init_state is not None:
self.state.update(init_state)
def log(self, message):
self.logs.append((' ' * self.log_indent) + message)
def dump_logs(self, filepath=None):
if filepath is not None:
with open(filepath, 'a') as f:
f.write("\n".join(self.logs))
else:
return "\n".join(self.logs)
def callback(self, **kw):
"""Execute something after the state has been updated by the given
state elements.
This default callback does nothing, overwrite it by subclassing
"""
pass
def store(self, **kw):
"""Store objects in the logger and trigger ``self.store_callback``.
This works exactly like ``logger()``, but the later is meant for simple
data objects (text, numbers) that will be sent over the network or
written to a file. The ``store`` method expects rather large objects
which are not necessarily serializable, and will be used eg to draw
plots on the fly.
"""
self.stored.update(kw)
self.store_callback(**kw)
def store_callback(self, **kw):
"""Execute something after the store has been updated by the given
state elements.
This default callback does nothing, overwrite it by subclassing
"""
pass
def iter(self, **kw):
"""Iterate through a list while updating the state.
Examples
--------
>>> for username in logger.iter(user=['tom', 'tim', 'lea']:
>>> # At every loop, logger.state['user'] is updated
>>> print (username)
"""
for field, iterable in kw.items():
for it in iterable:
self(**{field: it})
yield it
def __call__(self, **kw):
self.state.update(kw)
self.callback(**kw)
class ProgressBarLogger(ProgressLogger):
"""Generic class for progress loggers.
A progress logger contains a "state" dictionnary
Parameters
----------
init_state
Initial state of the logger
bars
Either None (will be initialized with no bar) or a list/tuple of bar
names (``['main', 'sub']``) which will be initialized with index -1 and
no total, or a dictionary (possibly ordered) of bars, of the form
``{bar_1: {title: 'bar1', index: 2, total:23}, bar_2: {...}}``
ignored_bars
Either None (newly met bars will be added) or a list of blacklisted bar
names, or ``'all_others'`` to signify that all bar names not already in
``self.bars`` will be ignored.
"""
bar_indent = 2
def __init__(self, init_state=None, bars=None, ignored_bars=None,
logged_bars='all', min_time_interval=0, ignore_bars_under=0):
ProgressLogger.__init__(self, init_state)
if bars is None:
bars = OrderedDict()
elif isinstance(bars, (list, tuple)):
bars = OrderedDict([
(b, dict(title=b, index=-1, total=None, message=None,
indent=0))
for b in bars
])
if isinstance(ignored_bars, (list, tuple)):
ignored_bars = set(ignored_bars)
self.ignored_bars = ignored_bars
self.logged_bars = logged_bars
self.state['bars'] = bars
self.min_time_interval = min_time_interval
self.ignore_bars_under = ignore_bars_under
@property
def bars(self):
"""Return ``self.state['bars'].``"""
return self.state['bars']
def bar_is_ignored(self, bar):
if self.ignored_bars is None:
return False
elif self.ignored_bars == 'all_others':
return (bar not in self.bars)
else:
return bar in self.ignored_bars
def bar_is_logged(self, bar):
if (not self.logged_bars):
return False
elif self.logged_bars == 'all':
return True
else:
return bar in self.logged_bars
def iterable_is_too_short(self, iterable):
length = len(iterable) if hasattr(iterable, '__len__') else None
return (length is not None) and (length < self.ignore_bars_under)
def iter_bar(self, bar_prefix='', **kw):
"""Iterate through a list while updating a state bar.
Examples
--------
>>> for username in logger.iter_bar(user=['tom', 'tim', 'lea']):
>>> # At every loop, logger.state['bars']['user'] is updated
>>> # to {index: i, total: 3, title:'user'}
>>> print (username)
"""
if 'bar_message' in kw:
bar_message = kw.pop('bar_message')
else:
bar_message = None
bar, iterable = kw.popitem()
if self.bar_is_ignored(bar) or self.iterable_is_too_short(iterable):
return iterable
bar = bar_prefix + bar
if hasattr(iterable, '__len__'):
self(**{bar + '__total': len(iterable)})
def new_iterable():
last_time = time.time()
i = 0 # necessary in case the iterator is empty
for i, it in enumerate(iterable):
now_time = time.time()
if (i == 0) or (now_time - last_time > self.min_time_interval):
if bar_message is not None:
self(**{bar + '__message': bar_message(it)})
self(**{bar + '__index': i})
last_time = now_time
yield it
if self.bars[bar]['index'] != i:
self(**{bar + '__index': i})
self(**{bar + '__index': i + 1})
return new_iterable()
def bars_callback(self, bar, attr, value, old_value=None):
"""Execute a custom action after the progress bars are updated.
Parameters
----------
bar
Name/ID of the bar to be modified.
attr
Attribute of the bar attribute to be modified
value
New value of the attribute
old_value
Previous value of this bar's attribute.
This default callback does nothing, overwrite it by subclassing.
"""
pass
def __call__(self, **kw):
items = sorted(kw.items(), key=lambda kv: not kv[0].endswith('total'))
for key, value in items:
if '__' in key:
bar, attr = key.split('__')
if self.bar_is_ignored(bar):
continue
kw.pop(key)
if bar not in self.bars:
self.bars[bar] = dict(title=bar, index=-1,
total=None, message=None)
old_value = self.bars[bar][attr]
if self.bar_is_logged(bar):
new_bar = (attr == 'index') and (value < old_value)
if (attr == 'total') or (new_bar):
self.bars[bar]['indent'] = self.log_indent
else:
self.log_indent = self.bars[bar]['indent']
self.log("[%s] %s: %s" % (bar, attr, value))
self.log_indent += self.bar_indent
self.bars[bar][attr] = value
self.bars_callback(bar, attr, value, old_value)
self.state.update(kw)
self.callback(**kw)
class TqdmProgressBarLogger(ProgressBarLogger):
"""Tqdm-powered progress bar for console or Notebooks.
Parameters
----------
init_state
Initial state of the logger
bars
Either None (will be initialized with no bar) or a list/tuple of bar
names (``['main', 'sub']``) which will be initialized with index -1 and
no total, or a dictionary (possibly ordered) of bars, of the form
``{bar_1: {title: 'bar1', index: 2, total:23}, bar_2: {...}}``
ignored_bars
Either None (newly met bars will be added) or a list of blacklisted bar
names, or ``'all_others'`` to signify that all bar names not already in
``self.bars`` will be ignored.
leave_bars
notebook
True will make the bars look nice (HTML) in the jupyter notebook. It is
advised to leave to 'default' as the default can be globally set from
inside a notebook with ``import proglog; proglog.notebook_mode()``.
print_messages
If True, every ``logger(message='something')`` will print a message in
the console / notebook
"""
def __init__(self, init_state=None, bars=None, leave_bars=False,
ignored_bars=None, logged_bars='all', notebook='default',
print_messages=True, min_time_interval=0,
ignore_bars_under=0):
ProgressBarLogger.__init__(self, init_state=init_state, bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
ignore_bars_under=ignore_bars_under,
min_time_interval=min_time_interval)
self.leave_bars = leave_bars
self.tqdm_bars = OrderedDict([
(bar, None)
for bar in self.bars
])
if notebook == 'default':
notebook = SETTINGS['notebook']
self.notebook = notebook
self.print_messages = print_messages
self.tqdm = (tqdm_notebook if self.notebook else tqdm)
def new_tqdm_bar(self, bar):
"""Create a new tqdm bar, possibly replacing an existing one."""
if (bar in self.tqdm_bars) and (self.tqdm_bars[bar] is not None):
self.close_tqdm_bar(bar)
infos = self.bars[bar]
self.tqdm_bars[bar] = self.tqdm(
total=infos['total'],
desc=infos['title'],
postfix=dict(now=troncate_string(str(infos['message']))),
leave=self.leave_bars
)
def close_tqdm_bar(self, bar):
"""Close and erase the tqdm bar"""
self.tqdm_bars[bar].close()
if not self.notebook:
self.tqdm_bars[bar] = None
def bars_callback(self, bar, attr, value, old_value):
if (bar not in self.tqdm_bars) or (self.tqdm_bars[bar] is None):
self.new_tqdm_bar(bar)
if attr == 'index':
if value >= old_value:
total = self.bars[bar]['total']
if total and (value >= total):
self.close_tqdm_bar(bar)
else:
self.tqdm_bars[bar].update(value - old_value)
else:
self.new_tqdm_bar(bar)
self.tqdm_bars[bar].update(value + 1)
elif attr == 'message':
self.tqdm_bars[bar].set_postfix(now=troncate_string(str(value)))
self.tqdm_bars[bar].update(0)
def callback(self, **kw):
if self.print_messages and ('message' in kw) and kw['message']:
if self.notebook:
print(kw['message'])
else:
self.tqdm.write(kw['message'])
class RqWorkerProgressLogger:
def __init__(self, job):
self.job = job
if 'progress_data' not in self.job.meta:
self.job.meta['progress_data'] = {}
self.job.save()
def callback(self, **kw):
self.job.meta['progress_data'] = self.state
self.job.save()
class RqWorkerBarLogger(RqWorkerProgressLogger, ProgressBarLogger):
def __init__(self, job, init_state=None, bars=None, ignored_bars=(),
logged_bars='all', min_time_interval=0):
RqWorkerProgressLogger.__init__(self, job)
ProgressBarLogger.__init__(self, init_state=init_state, bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
min_time_interval=min_time_interval)
class MuteProgressBarLogger(ProgressBarLogger):
def bar_is_ignored(self, bar):
return True
def default_bar_logger(logger, bars=None, ignored_bars=None, logged_bars='all',
min_time_interval=0, ignore_bars_under=0):
if logger == 'bar':
return TqdmProgressBarLogger(
bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
min_time_interval=min_time_interval,
ignore_bars_under=ignore_bars_under
)
elif logger is None:
return MuteProgressBarLogger()
else:
return logger
| en | 0.757778 | Implements the generic progress logger class, and the ProgressBar class. Generic class for progress loggers. A progress logger contains a "state" dictionnary. Parameters ---------- init_state Dictionnary representing the initial state. Execute something after the state has been updated by the given state elements. This default callback does nothing, overwrite it by subclassing Store objects in the logger and trigger ``self.store_callback``. This works exactly like ``logger()``, but the later is meant for simple data objects (text, numbers) that will be sent over the network or written to a file. The ``store`` method expects rather large objects which are not necessarily serializable, and will be used eg to draw plots on the fly. Execute something after the store has been updated by the given state elements. This default callback does nothing, overwrite it by subclassing Iterate through a list while updating the state. Examples -------- >>> for username in logger.iter(user=['tom', 'tim', 'lea']: >>> # At every loop, logger.state['user'] is updated >>> print (username) Generic class for progress loggers. A progress logger contains a "state" dictionnary Parameters ---------- init_state Initial state of the logger bars Either None (will be initialized with no bar) or a list/tuple of bar names (``['main', 'sub']``) which will be initialized with index -1 and no total, or a dictionary (possibly ordered) of bars, of the form ``{bar_1: {title: 'bar1', index: 2, total:23}, bar_2: {...}}`` ignored_bars Either None (newly met bars will be added) or a list of blacklisted bar names, or ``'all_others'`` to signify that all bar names not already in ``self.bars`` will be ignored. Return ``self.state['bars'].`` Iterate through a list while updating a state bar. Examples -------- >>> for username in logger.iter_bar(user=['tom', 'tim', 'lea']): >>> # At every loop, logger.state['bars']['user'] is updated >>> # to {index: i, total: 3, title:'user'} >>> print (username) # necessary in case the iterator is empty Execute a custom action after the progress bars are updated. Parameters ---------- bar Name/ID of the bar to be modified. attr Attribute of the bar attribute to be modified value New value of the attribute old_value Previous value of this bar's attribute. This default callback does nothing, overwrite it by subclassing. Tqdm-powered progress bar for console or Notebooks. Parameters ---------- init_state Initial state of the logger bars Either None (will be initialized with no bar) or a list/tuple of bar names (``['main', 'sub']``) which will be initialized with index -1 and no total, or a dictionary (possibly ordered) of bars, of the form ``{bar_1: {title: 'bar1', index: 2, total:23}, bar_2: {...}}`` ignored_bars Either None (newly met bars will be added) or a list of blacklisted bar names, or ``'all_others'`` to signify that all bar names not already in ``self.bars`` will be ignored. leave_bars notebook True will make the bars look nice (HTML) in the jupyter notebook. It is advised to leave to 'default' as the default can be globally set from inside a notebook with ``import proglog; proglog.notebook_mode()``. print_messages If True, every ``logger(message='something')`` will print a message in the console / notebook Create a new tqdm bar, possibly replacing an existing one. Close and erase the tqdm bar | 3.17138 | 3 |
gdsfactory/tests/test_component_from_yaml_bezier.py | jorgepadilla19/gdsfactory | 42 | 8373 | <reponame>jorgepadilla19/gdsfactory
import gdsfactory as gf
from gdsfactory.component import Component
yaml = """
name:
test_component_yaml_without_cell
instances:
mmi:
component: mmi1x2
bend:
component: bend_s
connections:
bend,o1: mmi,o2
"""
def test_component_from_yaml_without_cell() -> Component:
"""bezier does not have cell"""
c = gf.read.from_yaml(yaml)
assert c.name == "test_component_yaml_without_cell", c.name
assert len(c.get_dependencies()) == 2, len(c.get_dependencies())
assert len(c.ports) == 0, len(c.ports)
return c
if __name__ == "__main__":
c = test_component_from_yaml_without_cell()
print(c.name)
c.show()
| import gdsfactory as gf
from gdsfactory.component import Component
yaml = """
name:
test_component_yaml_without_cell
instances:
mmi:
component: mmi1x2
bend:
component: bend_s
connections:
bend,o1: mmi,o2
"""
def test_component_from_yaml_without_cell() -> Component:
"""bezier does not have cell"""
c = gf.read.from_yaml(yaml)
assert c.name == "test_component_yaml_without_cell", c.name
assert len(c.get_dependencies()) == 2, len(c.get_dependencies())
assert len(c.ports) == 0, len(c.ports)
return c
if __name__ == "__main__":
c = test_component_from_yaml_without_cell()
print(c.name)
c.show() | en | 0.640118 | name: test_component_yaml_without_cell instances: mmi: component: mmi1x2 bend: component: bend_s connections: bend,o1: mmi,o2 bezier does not have cell | 2.438397 | 2 |
cats/types.py | AdamBrianBright/cats-python | 2 | 8374 | <filename>cats/types.py
from pathlib import Path
from types import GeneratorType
from typing import AsyncIterable, Iterable, TypeAlias
import ujson
from cats.errors import MalformedHeadersError
try:
from django.db.models import QuerySet, Model
except ImportError:
QuerySet = type('QuerySet', (list,), {})
Model = type('Model', (list,), {})
__all__ = [
'Bytes',
'BytesGen',
'BytesAsyncGen',
'BytesAnyGen',
'Byte',
'Json',
'File',
'List',
'Missing',
'MISSING',
'QuerySet',
'Model',
'T_Headers',
'Headers',
]
Bytes: TypeAlias = bytes | bytearray | memoryview
BytesGen: TypeAlias = Iterable[Bytes]
BytesAsyncGen: TypeAlias = AsyncIterable[Bytes]
BytesAnyGen: TypeAlias = BytesGen | BytesAsyncGen
Byte: TypeAlias = Bytes
Json: TypeAlias = str | int | float | dict | list | bool | None
File: TypeAlias = Path | str
List = list | tuple | set | GeneratorType | QuerySet
class Missing(str):
"""
Custom Missing type is required for Pydantic to work properly. IDK
"""
__slots__ = ()
def __init__(self):
super().__init__()
def __eq__(self, other):
return isinstance(other, Missing)
def __bool__(self):
return False
MISSING = Missing()
class Headers(dict):
__slots__ = ()
def __init__(self, *args, **kwargs):
v = self._convert(*args, **kwargs)
if (offset := v.get('offset', None)) and (not isinstance(offset, int) or offset < 0):
raise MalformedHeadersError('Invalid offset header', headers=v)
super().__init__(v)
@classmethod
def _key(cls, key: str) -> str:
return key.replace(' ', '-').title()
def __getitem__(self, item):
return super().__getitem__(self._key(item))
def __setitem__(self, key, value):
return super().__setitem__(self._key(key), value)
def __delitem__(self, key):
return super().__delitem__(self._key(key))
def __contains__(self, item):
return super().__contains__(self._key(item))
@classmethod
def _convert(cls, *args, **kwargs):
return {cls._key(k): v for k, v in dict(*args, **kwargs).items() if isinstance(k, str)}
def update(self, *args, **kwargs) -> None:
super().update(self._convert(*args, **kwargs))
def encode(self) -> bytes:
return ujson.dumps(self, ensure_ascii=False, escape_forward_slashes=False).encode('utf-8')
@classmethod
def decode(cls, headers: Bytes) -> 'Headers':
try:
headers = ujson.loads(headers)
except ValueError: # + UnicodeDecodeError
headers = None
return cls(headers or {})
T_Headers: TypeAlias = Headers | dict[str]
| <filename>cats/types.py
from pathlib import Path
from types import GeneratorType
from typing import AsyncIterable, Iterable, TypeAlias
import ujson
from cats.errors import MalformedHeadersError
try:
from django.db.models import QuerySet, Model
except ImportError:
QuerySet = type('QuerySet', (list,), {})
Model = type('Model', (list,), {})
__all__ = [
'Bytes',
'BytesGen',
'BytesAsyncGen',
'BytesAnyGen',
'Byte',
'Json',
'File',
'List',
'Missing',
'MISSING',
'QuerySet',
'Model',
'T_Headers',
'Headers',
]
Bytes: TypeAlias = bytes | bytearray | memoryview
BytesGen: TypeAlias = Iterable[Bytes]
BytesAsyncGen: TypeAlias = AsyncIterable[Bytes]
BytesAnyGen: TypeAlias = BytesGen | BytesAsyncGen
Byte: TypeAlias = Bytes
Json: TypeAlias = str | int | float | dict | list | bool | None
File: TypeAlias = Path | str
List = list | tuple | set | GeneratorType | QuerySet
class Missing(str):
"""
Custom Missing type is required for Pydantic to work properly. IDK
"""
__slots__ = ()
def __init__(self):
super().__init__()
def __eq__(self, other):
return isinstance(other, Missing)
def __bool__(self):
return False
MISSING = Missing()
class Headers(dict):
__slots__ = ()
def __init__(self, *args, **kwargs):
v = self._convert(*args, **kwargs)
if (offset := v.get('offset', None)) and (not isinstance(offset, int) or offset < 0):
raise MalformedHeadersError('Invalid offset header', headers=v)
super().__init__(v)
@classmethod
def _key(cls, key: str) -> str:
return key.replace(' ', '-').title()
def __getitem__(self, item):
return super().__getitem__(self._key(item))
def __setitem__(self, key, value):
return super().__setitem__(self._key(key), value)
def __delitem__(self, key):
return super().__delitem__(self._key(key))
def __contains__(self, item):
return super().__contains__(self._key(item))
@classmethod
def _convert(cls, *args, **kwargs):
return {cls._key(k): v for k, v in dict(*args, **kwargs).items() if isinstance(k, str)}
def update(self, *args, **kwargs) -> None:
super().update(self._convert(*args, **kwargs))
def encode(self) -> bytes:
return ujson.dumps(self, ensure_ascii=False, escape_forward_slashes=False).encode('utf-8')
@classmethod
def decode(cls, headers: Bytes) -> 'Headers':
try:
headers = ujson.loads(headers)
except ValueError: # + UnicodeDecodeError
headers = None
return cls(headers or {})
T_Headers: TypeAlias = Headers | dict[str]
| en | 0.776727 | Custom Missing type is required for Pydantic to work properly. IDK # + UnicodeDecodeError | 2.392644 | 2 |
raven/utils/urlparse.py | MyCollege/raven | 0 | 8375 | <gh_stars>0
from __future__ import absolute_import
try:
import urlparse as _urlparse
except ImportError:
from urllib import parse as _urlparse
def register_scheme(scheme):
for method in filter(lambda s: s.startswith('uses_'), dir(_urlparse)):
uses = getattr(_urlparse, method)
if scheme not in uses:
uses.append(scheme)
urlparse = _urlparse.urlparse
| from __future__ import absolute_import
try:
import urlparse as _urlparse
except ImportError:
from urllib import parse as _urlparse
def register_scheme(scheme):
for method in filter(lambda s: s.startswith('uses_'), dir(_urlparse)):
uses = getattr(_urlparse, method)
if scheme not in uses:
uses.append(scheme)
urlparse = _urlparse.urlparse | none | 1 | 2.277474 | 2 |
|
setup.py | stjordanis/MONeT-1 | 161 | 8376 | <filename>setup.py
import setuptools
setuptools.setup(
name="monet_memory_optimized_training",
version="0.0.1",
description="Memory Optimized Network Training Framework",
url="https://github.com/philkr/lowrank_conv",
packages=setuptools.find_packages(include = ['monet', 'monet.*', 'models', 'checkmate', 'gist']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| <filename>setup.py
import setuptools
setuptools.setup(
name="monet_memory_optimized_training",
version="0.0.1",
description="Memory Optimized Network Training Framework",
url="https://github.com/philkr/lowrank_conv",
packages=setuptools.find_packages(include = ['monet', 'monet.*', 'models', 'checkmate', 'gist']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| none | 1 | 1.227586 | 1 |
|
Tests/Methods/Machine/test_Magnet_Type_11_meth.py | Superomeg4/pyleecan | 2 | 8377 | # -*- coding: utf-8 -*-
"""
@date Created on Thu Dec 18 13:56:33 2014
@copyright (C) 2014-2015 EOMYS ENGINEERING.
@author pierre_b
"""
from unittest import TestCase
from ddt import ddt, data
from pyleecan.Classes.Arc1 import Arc1
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.MagnetType11 import MagnetType11
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.SlotMPolar import SlotMPolar
from numpy import pi, exp, angle, array
from pyleecan.Methods.Machine.Magnet.comp_surface import comp_surface
Mag11_test = list()
# Internal Slot surface
lam = LamSlotMag(is_internal=True, Rext=0.5)
lam.slot = SlotMPolar(H0=0, W0=pi / 4, Zs=4)
lam.slot.magnet = [MagnetType11(Hmag=1, Wmag=pi / 4)]
Mag11_test.append({"test_obj": lam, "S_exp": 0.78539616, "Ao": pi / 4, "H_exp": 1})
# Internal Slot inset
lam = LamSlotMag(is_internal=True, Rext=0.5)
lam.slot = SlotMPolar(H0=40e-3, W0=pi / 4, Zs=4)
lam.slot.magnet = [MagnetType11(Hmag=20e-3, Wmag=pi / 4)]
Mag11_test.append({"test_obj": lam, "S_exp": 7.3827e-3, "Ao": pi / 4, "H_exp": 20e-3})
# Outward Slot inset
lam = LamSlotMag(is_internal=False, Rext=0.1325)
lam.slot = SlotMPolar(H0=5e-3, W0=pi / 10, Zs=8)
lam.slot.magnet = [MagnetType11(Hmag=8e-3, Wmag=pi / 12)]
Mag11_test.append({"test_obj": lam, "S_exp": 2.09439e-6, "Ao": pi / 12, "H_exp": 8e-3})
# For AlmostEqual
DELTA = 1e-4
@ddt
class test_Magnet_Type_11_meth(TestCase):
"""unittest for MagnetType11 methods
"""
@data(*Mag11_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
# Compare numerical and analytical results
b = comp_surface(test_obj.slot.magnet[0])
msg = "Analytical: " + str(a) + " Numerical " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*Mag11_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*Mag11_test)
def test_comp_angle_op(self, test_dict):
"""Check that the computation of the opening angle is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_angle_opening()
a = result
b = test_dict["Ao"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
def test_build_geometry_out(self):
"""check that curve_list is correct (outwards magnet)"""
lam = LamSlotMag(
Rint=40e-3,
Rext=90e-3,
is_internal=False,
is_stator=False,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
magnet = [MagnetType11(Wmag=pi / 10, Hmag=0.2)]
lam.slot = SlotMPolar(Zs=8, W0=pi / 10, H0=0.2, magnet=magnet)
test_obj = lam.slot.magnet[0]
Z1 = (40e-3 + 0.2) * exp(-1j * pi / 10 / 2)
Z2 = (40e-3 + 0.2) * exp(1j * pi / 10 / 2)
Z = abs(Z1)
Z3 = (Z - 0.2) * exp(1j * angle(Z1))
Z4 = (Z - 0.2) * exp(1j * angle(Z2))
# # Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z3))
curve_list.append(Arc1(Z3, Z4, abs(Z3)))
curve_list.append(Segment(Z4, Z2))
curve_list.append(Arc1(Z2, Z1, -abs(Z2)))
surface = test_obj.build_geometry()
result = surface[0].get_lines()
for i in range(0, len(result)):
a = result[i].begin
b = curve_list[i].begin
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
a = result[i].end
b = curve_list[i].end
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
def test_build_geometry_in(self):
"""check that curve_list is correct (inwards magnet)"""
lam = LamSlotMag(
Rint=40e-1,
Rext=90e-1,
is_internal=True,
is_stator=False,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
magnet = [MagnetType11(Wmag=pi / 10, Hmag=0.2)]
lam.slot = SlotMPolar(Zs=8, W0=pi / 10, H0=0.2, magnet=magnet)
test_obj = lam.slot.magnet[0]
Z1 = (90e-1 - 0.2) * exp(-1j * pi / 10 / 2)
Z2 = (90e-1 - 0.2) * exp(1j * pi / 10 / 2)
Z = abs(Z1)
Z3 = (Z + 0.2) * exp(1j * angle(Z1))
Z4 = (Z + 0.2) * exp(1j * angle(Z2))
# # Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z3))
curve_list.append(Arc1(Z3, Z4, abs(Z3)))
curve_list.append(Segment(Z4, Z2))
curve_list.append(Arc1(Z2, Z1, -abs(Z2)))
surface = test_obj.build_geometry()
result = surface[0].get_lines()
for i in range(0, len(result)):
a = result[i].begin
b = curve_list[i].begin
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
a = result[i].end
b = curve_list[i].end
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
| # -*- coding: utf-8 -*-
"""
@date Created on Thu Dec 18 13:56:33 2014
@copyright (C) 2014-2015 EOMYS ENGINEERING.
@author pierre_b
"""
from unittest import TestCase
from ddt import ddt, data
from pyleecan.Classes.Arc1 import Arc1
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.MagnetType11 import MagnetType11
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.SlotMPolar import SlotMPolar
from numpy import pi, exp, angle, array
from pyleecan.Methods.Machine.Magnet.comp_surface import comp_surface
Mag11_test = list()
# Internal Slot surface
lam = LamSlotMag(is_internal=True, Rext=0.5)
lam.slot = SlotMPolar(H0=0, W0=pi / 4, Zs=4)
lam.slot.magnet = [MagnetType11(Hmag=1, Wmag=pi / 4)]
Mag11_test.append({"test_obj": lam, "S_exp": 0.78539616, "Ao": pi / 4, "H_exp": 1})
# Internal Slot inset
lam = LamSlotMag(is_internal=True, Rext=0.5)
lam.slot = SlotMPolar(H0=40e-3, W0=pi / 4, Zs=4)
lam.slot.magnet = [MagnetType11(Hmag=20e-3, Wmag=pi / 4)]
Mag11_test.append({"test_obj": lam, "S_exp": 7.3827e-3, "Ao": pi / 4, "H_exp": 20e-3})
# Outward Slot inset
lam = LamSlotMag(is_internal=False, Rext=0.1325)
lam.slot = SlotMPolar(H0=5e-3, W0=pi / 10, Zs=8)
lam.slot.magnet = [MagnetType11(Hmag=8e-3, Wmag=pi / 12)]
Mag11_test.append({"test_obj": lam, "S_exp": 2.09439e-6, "Ao": pi / 12, "H_exp": 8e-3})
# For AlmostEqual
DELTA = 1e-4
@ddt
class test_Magnet_Type_11_meth(TestCase):
"""unittest for MagnetType11 methods
"""
@data(*Mag11_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
# Compare numerical and analytical results
b = comp_surface(test_obj.slot.magnet[0])
msg = "Analytical: " + str(a) + " Numerical " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*Mag11_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*Mag11_test)
def test_comp_angle_op(self, test_dict):
"""Check that the computation of the opening angle is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_angle_opening()
a = result
b = test_dict["Ao"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
def test_build_geometry_out(self):
"""check that curve_list is correct (outwards magnet)"""
lam = LamSlotMag(
Rint=40e-3,
Rext=90e-3,
is_internal=False,
is_stator=False,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
magnet = [MagnetType11(Wmag=pi / 10, Hmag=0.2)]
lam.slot = SlotMPolar(Zs=8, W0=pi / 10, H0=0.2, magnet=magnet)
test_obj = lam.slot.magnet[0]
Z1 = (40e-3 + 0.2) * exp(-1j * pi / 10 / 2)
Z2 = (40e-3 + 0.2) * exp(1j * pi / 10 / 2)
Z = abs(Z1)
Z3 = (Z - 0.2) * exp(1j * angle(Z1))
Z4 = (Z - 0.2) * exp(1j * angle(Z2))
# # Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z3))
curve_list.append(Arc1(Z3, Z4, abs(Z3)))
curve_list.append(Segment(Z4, Z2))
curve_list.append(Arc1(Z2, Z1, -abs(Z2)))
surface = test_obj.build_geometry()
result = surface[0].get_lines()
for i in range(0, len(result)):
a = result[i].begin
b = curve_list[i].begin
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
a = result[i].end
b = curve_list[i].end
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
def test_build_geometry_in(self):
"""check that curve_list is correct (inwards magnet)"""
lam = LamSlotMag(
Rint=40e-1,
Rext=90e-1,
is_internal=True,
is_stator=False,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
magnet = [MagnetType11(Wmag=pi / 10, Hmag=0.2)]
lam.slot = SlotMPolar(Zs=8, W0=pi / 10, H0=0.2, magnet=magnet)
test_obj = lam.slot.magnet[0]
Z1 = (90e-1 - 0.2) * exp(-1j * pi / 10 / 2)
Z2 = (90e-1 - 0.2) * exp(1j * pi / 10 / 2)
Z = abs(Z1)
Z3 = (Z + 0.2) * exp(1j * angle(Z1))
Z4 = (Z + 0.2) * exp(1j * angle(Z2))
# # Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z3))
curve_list.append(Arc1(Z3, Z4, abs(Z3)))
curve_list.append(Segment(Z4, Z2))
curve_list.append(Arc1(Z2, Z1, -abs(Z2)))
surface = test_obj.build_geometry()
result = surface[0].get_lines()
for i in range(0, len(result)):
a = result[i].begin
b = curve_list[i].begin
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
a = result[i].end
b = curve_list[i].end
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
| en | 0.817221 | # -*- coding: utf-8 -*- @date Created on Thu Dec 18 13:56:33 2014 @copyright (C) 2014-2015 EOMYS ENGINEERING. @author pierre_b # Internal Slot surface # Internal Slot inset # Outward Slot inset # For AlmostEqual unittest for MagnetType11 methods Check that the computation of the surface is correct # Compare numerical and analytical results Check that the computation of the height is correct Check that the computation of the opening angle is correct check that curve_list is correct (outwards magnet) # # Creation of curve check that curve_list is correct (inwards magnet) # # Creation of curve | 2.012276 | 2 |
tomo_encoders/tasks/void_mapping.py | arshadzahangirchowdhury/TomoEncoders | 0 | 8378 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
from operator import mod
from tomo_encoders.misc.voxel_processing import modified_autocontrast, TimerGPU
from tomo_encoders.reconstruction.recon import recon_patches_3d
import cupy as cp
import numpy as np
from skimage.filters import threshold_otsu
from tomo_encoders import Grid
def get_values_cyl_mask(vol, mask_fac):
vol_shape = vol.shape
assert vol_shape[1] == vol_shape[2], "must be a tomographic volume where shape y = shape x"
shape_yx = vol_shape[1]
shape_z = vol_shape[0]
rad = int(mask_fac*shape_yx/2)
pts = cp.arange(-int(shape_yx//2), int(cp.ceil(shape_yx//2)))
yy, xx = cp.meshgrid(pts, pts, indexing = 'ij')
circ = (cp.sqrt(yy**2 + xx**2) < rad).astype(cp.uint8) # inside is positive
circ = circ[cp.newaxis, ...]
cyl = cp.repeat(circ, shape_z, axis = 0)
return vol[cyl > 0]
def cylindrical_mask(out_vol, mask_fac, mask_val = 0):
vol_shape = out_vol.shape
assert vol_shape[1] == vol_shape[2], "must be a tomographic volume where shape y = shape x"
shape_yx = vol_shape[1]
shape_z = vol_shape[0]
rad = int(mask_fac*shape_yx/2)
pts = cp.arange(-int(shape_yx//2), int(cp.ceil(shape_yx//2)))
yy, xx = cp.meshgrid(pts, pts, indexing = 'ij')
circ = (cp.sqrt(yy**2 + xx**2) < rad).astype(cp.uint8) # inside is positive
circ = circ[cp.newaxis, ...]
cyl = cp.repeat(circ, shape_z, axis = 0)
out_vol[cyl == 0] = mask_val
return
def segment_otsu(vol, s = 0.05):
'''segment volume with otsu'''
timer = TimerGPU()
timer.tic()
tmp_values = vol[::4,::4,::4].get()
# rec_min_max = modified_autocontrast(tmp_values, s = s, normalize_sampling_factor=1)
thresh = cp.float32(threshold_otsu(tmp_values.reshape(-1)))
vol = (vol < thresh).astype(cp.uint8)
timer.toc("otsu thresholding")
return vol
def edge_map(Y):
'''
this algorithm was inspired by: https://github.com/tomochallenge/tomochallenge_utils/blob/master/foam_phantom_utils.py
'''
msk = cp.zeros_like(Y)
tmp = Y[:-1]!=Y[1:]
msk[:-1][tmp] = 1
msk[1:][tmp] = 1
tmp = Y[:,:-1]!=Y[:,1:]
msk[:,:-1][tmp] = 1
msk[:,1:][tmp] = 1
tmp = Y[:,:,:-1]!=Y[:,:,1:]
msk[:,:,:-1][tmp] = 1
msk[:,:,1:][tmp] = 1
return msk > 0
def guess_surface(V_bin, b, wd):
# find patches on surface
wdb = int(wd//b)
p3d = Grid(V_bin.shape, width = wdb)
x = p3d.extract(V_bin)
is_surf = (np.std(x, axis = (1,2,3)) > 0.0)
is_ones = (np.sum(x, axis = (1,2,3))/(wdb**3) == 1)
is_zeros = (np.sum(x, axis = (1,2,3))/(wdb**3) == 0)
p3d = p3d.rescale(b)
p3d_surf = p3d.filter_by_condition(is_surf)
p3d_ones = p3d.filter_by_condition(is_ones)
p3d_zeros = p3d.filter_by_condition(is_zeros)
eff = len(p3d_surf)*(wd**3)/np.prod(p3d_surf.vol_shape)
print(f"\tSTAT: r value: {eff*100.0:.2f}")
return p3d_surf, p3d_ones, p3d_zeros
def process_patches(projs, theta, center, fe, p_surf, min_max, TIMEIT = False):
# SCHEME 1: integrate reconstruction and segmention (segments data on gpu itself)
# st_proc = cp.cuda.Event(); end_proc = cp.cuda.Event(); st_proc.record()
# x_surf, p_surf = recon_patches_3d(projs, theta, center, p_surf, \
# apply_fbp = True, segmenter = fe, \
# segmenter_batch_size = 256)
# end_proc.record(); end_proc.synchronize(); t_surf = cp.cuda.get_elapsed_time(st_proc,end_proc)
# SCHEME 2: reconstruct and segment separately (copies rec data from gpu to cpu)
st_rec = cp.cuda.Event(); end_rec = cp.cuda.Event(); st_rec.record()
x_surf, p_surf = recon_patches_3d(projs, theta, center, p_surf, \
apply_fbp =True)
end_rec.record(); end_rec.synchronize(); t_rec = cp.cuda.get_elapsed_time(st_rec,end_rec)
st_seg = cp.cuda.Event(); end_seg = cp.cuda.Event(); st_seg.record()
x_surf = np.clip(x_surf, *min_max)
x_surf = fe.predict_patches("segmenter", x_surf[...,np.newaxis], 256, None, min_max = min_max)[...,0]
end_seg.record(); end_seg.synchronize(); t_seg = cp.cuda.get_elapsed_time(st_seg,end_seg)
print(f'\tTIME: local reconstruction - {t_rec/1000.0:.2f} secs')
print(f'\tTIME: local segmentation - {t_seg/1000.0:.2f} secs')
print(f'\tSTAT: total patches in neighborhood: {len(p_surf)}')
if TIMEIT:
return x_surf, p_surf, t_rec, t_seg
else:
return x_surf, p_surf
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
from operator import mod
from tomo_encoders.misc.voxel_processing import modified_autocontrast, TimerGPU
from tomo_encoders.reconstruction.recon import recon_patches_3d
import cupy as cp
import numpy as np
from skimage.filters import threshold_otsu
from tomo_encoders import Grid
def get_values_cyl_mask(vol, mask_fac):
vol_shape = vol.shape
assert vol_shape[1] == vol_shape[2], "must be a tomographic volume where shape y = shape x"
shape_yx = vol_shape[1]
shape_z = vol_shape[0]
rad = int(mask_fac*shape_yx/2)
pts = cp.arange(-int(shape_yx//2), int(cp.ceil(shape_yx//2)))
yy, xx = cp.meshgrid(pts, pts, indexing = 'ij')
circ = (cp.sqrt(yy**2 + xx**2) < rad).astype(cp.uint8) # inside is positive
circ = circ[cp.newaxis, ...]
cyl = cp.repeat(circ, shape_z, axis = 0)
return vol[cyl > 0]
def cylindrical_mask(out_vol, mask_fac, mask_val = 0):
vol_shape = out_vol.shape
assert vol_shape[1] == vol_shape[2], "must be a tomographic volume where shape y = shape x"
shape_yx = vol_shape[1]
shape_z = vol_shape[0]
rad = int(mask_fac*shape_yx/2)
pts = cp.arange(-int(shape_yx//2), int(cp.ceil(shape_yx//2)))
yy, xx = cp.meshgrid(pts, pts, indexing = 'ij')
circ = (cp.sqrt(yy**2 + xx**2) < rad).astype(cp.uint8) # inside is positive
circ = circ[cp.newaxis, ...]
cyl = cp.repeat(circ, shape_z, axis = 0)
out_vol[cyl == 0] = mask_val
return
def segment_otsu(vol, s = 0.05):
'''segment volume with otsu'''
timer = TimerGPU()
timer.tic()
tmp_values = vol[::4,::4,::4].get()
# rec_min_max = modified_autocontrast(tmp_values, s = s, normalize_sampling_factor=1)
thresh = cp.float32(threshold_otsu(tmp_values.reshape(-1)))
vol = (vol < thresh).astype(cp.uint8)
timer.toc("otsu thresholding")
return vol
def edge_map(Y):
'''
this algorithm was inspired by: https://github.com/tomochallenge/tomochallenge_utils/blob/master/foam_phantom_utils.py
'''
msk = cp.zeros_like(Y)
tmp = Y[:-1]!=Y[1:]
msk[:-1][tmp] = 1
msk[1:][tmp] = 1
tmp = Y[:,:-1]!=Y[:,1:]
msk[:,:-1][tmp] = 1
msk[:,1:][tmp] = 1
tmp = Y[:,:,:-1]!=Y[:,:,1:]
msk[:,:,:-1][tmp] = 1
msk[:,:,1:][tmp] = 1
return msk > 0
def guess_surface(V_bin, b, wd):
# find patches on surface
wdb = int(wd//b)
p3d = Grid(V_bin.shape, width = wdb)
x = p3d.extract(V_bin)
is_surf = (np.std(x, axis = (1,2,3)) > 0.0)
is_ones = (np.sum(x, axis = (1,2,3))/(wdb**3) == 1)
is_zeros = (np.sum(x, axis = (1,2,3))/(wdb**3) == 0)
p3d = p3d.rescale(b)
p3d_surf = p3d.filter_by_condition(is_surf)
p3d_ones = p3d.filter_by_condition(is_ones)
p3d_zeros = p3d.filter_by_condition(is_zeros)
eff = len(p3d_surf)*(wd**3)/np.prod(p3d_surf.vol_shape)
print(f"\tSTAT: r value: {eff*100.0:.2f}")
return p3d_surf, p3d_ones, p3d_zeros
def process_patches(projs, theta, center, fe, p_surf, min_max, TIMEIT = False):
# SCHEME 1: integrate reconstruction and segmention (segments data on gpu itself)
# st_proc = cp.cuda.Event(); end_proc = cp.cuda.Event(); st_proc.record()
# x_surf, p_surf = recon_patches_3d(projs, theta, center, p_surf, \
# apply_fbp = True, segmenter = fe, \
# segmenter_batch_size = 256)
# end_proc.record(); end_proc.synchronize(); t_surf = cp.cuda.get_elapsed_time(st_proc,end_proc)
# SCHEME 2: reconstruct and segment separately (copies rec data from gpu to cpu)
st_rec = cp.cuda.Event(); end_rec = cp.cuda.Event(); st_rec.record()
x_surf, p_surf = recon_patches_3d(projs, theta, center, p_surf, \
apply_fbp =True)
end_rec.record(); end_rec.synchronize(); t_rec = cp.cuda.get_elapsed_time(st_rec,end_rec)
st_seg = cp.cuda.Event(); end_seg = cp.cuda.Event(); st_seg.record()
x_surf = np.clip(x_surf, *min_max)
x_surf = fe.predict_patches("segmenter", x_surf[...,np.newaxis], 256, None, min_max = min_max)[...,0]
end_seg.record(); end_seg.synchronize(); t_seg = cp.cuda.get_elapsed_time(st_seg,end_seg)
print(f'\tTIME: local reconstruction - {t_rec/1000.0:.2f} secs')
print(f'\tTIME: local segmentation - {t_seg/1000.0:.2f} secs')
print(f'\tSTAT: total patches in neighborhood: {len(p_surf)}')
if TIMEIT:
return x_surf, p_surf, t_rec, t_seg
else:
return x_surf, p_surf
| en | 0.515202 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # inside is positive # inside is positive segment volume with otsu # rec_min_max = modified_autocontrast(tmp_values, s = s, normalize_sampling_factor=1) this algorithm was inspired by: https://github.com/tomochallenge/tomochallenge_utils/blob/master/foam_phantom_utils.py # find patches on surface # SCHEME 1: integrate reconstruction and segmention (segments data on gpu itself) # st_proc = cp.cuda.Event(); end_proc = cp.cuda.Event(); st_proc.record() # x_surf, p_surf = recon_patches_3d(projs, theta, center, p_surf, \ # apply_fbp = True, segmenter = fe, \ # segmenter_batch_size = 256) # end_proc.record(); end_proc.synchronize(); t_surf = cp.cuda.get_elapsed_time(st_proc,end_proc) # SCHEME 2: reconstruct and segment separately (copies rec data from gpu to cpu) | 2.089592 | 2 |
handypackages/subscribe/migrations/0001_initial.py | roundium/handypackages | 1 | 8379 | # Generated by Django 2.2.1 on 2019-06-22 11:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SubscribeModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(db_index=True, max_length=255, unique=True, verbose_name='Email')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Subscribe Time')),
],
options={
'verbose_name': 'Subscribe Email',
'verbose_name_plural': 'Subscribe Emails',
'abstract': False,
},
),
]
| # Generated by Django 2.2.1 on 2019-06-22 11:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SubscribeModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(db_index=True, max_length=255, unique=True, verbose_name='Email')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Subscribe Time')),
],
options={
'verbose_name': 'Subscribe Email',
'verbose_name_plural': 'Subscribe Emails',
'abstract': False,
},
),
]
| en | 0.746472 | # Generated by Django 2.2.1 on 2019-06-22 11:03 | 1.806068 | 2 |
TuShare/view/sh_margins.py | lwh2015/TuShare | 1 | 8380 | # -*- coding: UTF-8 -*-
import json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import tushare as ts
from .publiceClass import DateEncoder
@csrf_exempt
def sh_margins(request):
try:
start = request.POST.get('start','')#选填
end = request.POST.get('end','')#选填
data = ts.sh_margins(start,end)
res = {'columns':[
'信用交易日期',
'本日融资余额(元)',
'本日融资买入额(元)',
'本日融券余量',
'本日融券余量金额(元)',
'本日融券卖出量',
'本日融资融券余额(元)'
],'data':json.loads(json.dumps(data.values,cls=DateEncoder))}
except(BaseException):
return HttpResponse(BaseException)
else:
return HttpResponse(json.dumps(res),content_type="application/json")
| # -*- coding: UTF-8 -*-
import json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import tushare as ts
from .publiceClass import DateEncoder
@csrf_exempt
def sh_margins(request):
try:
start = request.POST.get('start','')#选填
end = request.POST.get('end','')#选填
data = ts.sh_margins(start,end)
res = {'columns':[
'信用交易日期',
'本日融资余额(元)',
'本日融资买入额(元)',
'本日融券余量',
'本日融券余量金额(元)',
'本日融券卖出量',
'本日融资融券余额(元)'
],'data':json.loads(json.dumps(data.values,cls=DateEncoder))}
except(BaseException):
return HttpResponse(BaseException)
else:
return HttpResponse(json.dumps(res),content_type="application/json")
| zh | 0.266282 | # -*- coding: UTF-8 -*- #选填 #选填 | 2.05571 | 2 |
intermediate/classes/camera.py | robertob45/learning-python | 0 | 8381 | class Camera:
"""docstring for ."""
def __init__(self, brand, sensor, lens, battery):
self.brand = brand
self.sensor = sensor
self.lens = lens
self.battery = battery
def __str__(self):
return self.brand + ' ' + self.sensor + ' ' + self.lens + ' ' + self.battery
def focus(self):
print('Focusing using', self.lens, '...')
print('')
def frame(self):
print('Move until your subject is in the desired position')
print('.')
print('.')
print('.')
def flash(self, flash_use):
if flash_use == 's':
print('Shooting with flash...')
else:
print('Shooting without flash...')
print('')
def format(self, save_format):
if save_format == 'jpg':
print('Saving in: ' + save_format)
elif save_format == 'raw':
print('Saving in: ' + save_format)
else:
print('No valid format to save')
def take_picture(self, save_format, flash_use):
print('Say cheese!')
self.focus()
self.frame()
self.flash(flash_use)
self.format(save_format)
| class Camera:
"""docstring for ."""
def __init__(self, brand, sensor, lens, battery):
self.brand = brand
self.sensor = sensor
self.lens = lens
self.battery = battery
def __str__(self):
return self.brand + ' ' + self.sensor + ' ' + self.lens + ' ' + self.battery
def focus(self):
print('Focusing using', self.lens, '...')
print('')
def frame(self):
print('Move until your subject is in the desired position')
print('.')
print('.')
print('.')
def flash(self, flash_use):
if flash_use == 's':
print('Shooting with flash...')
else:
print('Shooting without flash...')
print('')
def format(self, save_format):
if save_format == 'jpg':
print('Saving in: ' + save_format)
elif save_format == 'raw':
print('Saving in: ' + save_format)
else:
print('No valid format to save')
def take_picture(self, save_format, flash_use):
print('Say cheese!')
self.focus()
self.frame()
self.flash(flash_use)
self.format(save_format)
| en | 0.659294 | docstring for . | 3.751097 | 4 |
dbaas/tsuru/tests/test_service_add.py | didindinn/database-as-a-service | 0 | 8382 | <filename>dbaas/tsuru/tests/test_service_add.py
from mock import patch, MagicMock
from django.contrib.auth.models import User
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.utils.datastructures import MultiValueDictKeyError
from account.models import Role, Team, Organization
from physical.tests.factory import EnvironmentFactory, PlanFactory
from physical.models import Plan
class ValidationTestCase(TestCase):
"""HTTP test cases for the tsuru Service Add. This class focuses on
validations of POST
"""
USERNAME = "fake_user"
PASSWORD = "<PASSWORD>"
def setUp(self):
self.role = Role.objects.get_or_create(name="fake_role")[0]
self.organization = Organization.objects.get_or_create(
name='fake_organization'
)[0]
self.team = Team.objects.get_or_create(
name="fake_team", role=self.role,
organization=self.organization)[0]
self.superuser = User.objects.create_superuser(
self.USERNAME,
email="{}<EMAIL>".<EMAIL>(self.<EMAIL>),
password=self.PASSWORD
)
self.team.users.add(self.superuser)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.env = 'dev'
self.environment = EnvironmentFactory.create(name=self.env)
self.url = reverse('tsuru:service-add', args=(self.env,))
self.name = 'fake_database'
self.user = <EMAIL>(self.<EMAIL>)
self.description = 'fake desc'
self.plan = PlanFactory(name='fake_plan', provider=Plan.CLOUDSTACK)
self.plan.environments.add(self.environment)
self.plan_name = 'fake-plan-dev'
def tearDown(self):
self.client.logout()
def _assert_resp(self, resp, msg):
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.content, msg)
def test_name_not_in_payload(self):
with self.assertRaises(MultiValueDictKeyError):
self.client.post(self.url, {})
def test_user_not_in_payload(self):
with self.assertRaises(MultiValueDictKeyError):
self.client.post(
self.url,
{'name': self.name}
)
def test_team_not_in_payload(self):
with self.assertRaises(MultiValueDictKeyError):
self.client.post(
self.url,
{'name': self.name, 'user': self.user}
)
def test_description_fail(self):
resp = self.client.post(
self.url,
{'name': self.name, 'user': self.user, 'team': self.team}
)
self._assert_resp(resp, '"A description must be provided."')
def test_name_fail(self):
resp = self.client.post(
self.url,
{
'name': '99invalid-name',
'user': self.user,
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"Your database name must match /^[a-z][a-z0-9_]+$/ ."'
)
@patch('tsuru.views.Database.objects.get', new=MagicMock())
def test_database_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"There is already a database called fake_database in dev."'
)
@patch(
'tsuru.views.database_name_evironment_constraint',
new=MagicMock(return_value=True)
)
def test_already_exist_database_with_name(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"fake_database already exists in env dev!"'
)
def test_user_not_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': '<EMAIL>',
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"User does not exist."'
)
def test_team_not_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': '<EMAIL>',
'description': self.description,
'team': 'team_not_found'
}
)
self._assert_resp(
resp,
'"User does not exist."'
)
def test_env_not_found(self):
self.url = self.url.replace(
'/{}/'.format(self.env),
'/env_not_found/'
)
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name
}
)
self._assert_resp(
resp,
'"Environment does not exist."'
)
@patch(
'tsuru.views.Team.count_databases_in_use',
new=MagicMock(return_value=99)
)
def test_allocation_limit(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name
}
)
self._assert_resp(
resp,
('"The database alocation limit of 2 has been exceeded for the '
'selected team: fake_team"')
)
def test_plan_not_on_payload(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name
}
)
self._assert_resp(
resp,
'"Plan was not found"'
)
def test_plan_not_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name,
'plan': 'not found'
}
)
self._assert_resp(
resp,
'"Plan was not found"'
)
@patch('notification.tasks.TaskRegister.create_task', new=MagicMock())
@patch('notification.tasks.create_database_with_retry')
def test_call_database_create(self, create_database_mock):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name,
'plan': self.plan_name
}
)
self.assertTrue(create_database_mock.called)
self.assertEqual(resp.status_code, 201)
| <filename>dbaas/tsuru/tests/test_service_add.py
from mock import patch, MagicMock
from django.contrib.auth.models import User
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.utils.datastructures import MultiValueDictKeyError
from account.models import Role, Team, Organization
from physical.tests.factory import EnvironmentFactory, PlanFactory
from physical.models import Plan
class ValidationTestCase(TestCase):
"""HTTP test cases for the tsuru Service Add. This class focuses on
validations of POST
"""
USERNAME = "fake_user"
PASSWORD = "<PASSWORD>"
def setUp(self):
self.role = Role.objects.get_or_create(name="fake_role")[0]
self.organization = Organization.objects.get_or_create(
name='fake_organization'
)[0]
self.team = Team.objects.get_or_create(
name="fake_team", role=self.role,
organization=self.organization)[0]
self.superuser = User.objects.create_superuser(
self.USERNAME,
email="{}<EMAIL>".<EMAIL>(self.<EMAIL>),
password=self.PASSWORD
)
self.team.users.add(self.superuser)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.env = 'dev'
self.environment = EnvironmentFactory.create(name=self.env)
self.url = reverse('tsuru:service-add', args=(self.env,))
self.name = 'fake_database'
self.user = <EMAIL>(self.<EMAIL>)
self.description = 'fake desc'
self.plan = PlanFactory(name='fake_plan', provider=Plan.CLOUDSTACK)
self.plan.environments.add(self.environment)
self.plan_name = 'fake-plan-dev'
def tearDown(self):
self.client.logout()
def _assert_resp(self, resp, msg):
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.content, msg)
def test_name_not_in_payload(self):
with self.assertRaises(MultiValueDictKeyError):
self.client.post(self.url, {})
def test_user_not_in_payload(self):
with self.assertRaises(MultiValueDictKeyError):
self.client.post(
self.url,
{'name': self.name}
)
def test_team_not_in_payload(self):
with self.assertRaises(MultiValueDictKeyError):
self.client.post(
self.url,
{'name': self.name, 'user': self.user}
)
def test_description_fail(self):
resp = self.client.post(
self.url,
{'name': self.name, 'user': self.user, 'team': self.team}
)
self._assert_resp(resp, '"A description must be provided."')
def test_name_fail(self):
resp = self.client.post(
self.url,
{
'name': '99invalid-name',
'user': self.user,
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"Your database name must match /^[a-z][a-z0-9_]+$/ ."'
)
@patch('tsuru.views.Database.objects.get', new=MagicMock())
def test_database_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"There is already a database called fake_database in dev."'
)
@patch(
'tsuru.views.database_name_evironment_constraint',
new=MagicMock(return_value=True)
)
def test_already_exist_database_with_name(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"fake_database already exists in env dev!"'
)
def test_user_not_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': '<EMAIL>',
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"User does not exist."'
)
def test_team_not_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': '<EMAIL>',
'description': self.description,
'team': 'team_not_found'
}
)
self._assert_resp(
resp,
'"User does not exist."'
)
def test_env_not_found(self):
self.url = self.url.replace(
'/{}/'.format(self.env),
'/env_not_found/'
)
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name
}
)
self._assert_resp(
resp,
'"Environment does not exist."'
)
@patch(
'tsuru.views.Team.count_databases_in_use',
new=MagicMock(return_value=99)
)
def test_allocation_limit(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name
}
)
self._assert_resp(
resp,
('"The database alocation limit of 2 has been exceeded for the '
'selected team: fake_team"')
)
def test_plan_not_on_payload(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name
}
)
self._assert_resp(
resp,
'"Plan was not found"'
)
def test_plan_not_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name,
'plan': 'not found'
}
)
self._assert_resp(
resp,
'"Plan was not found"'
)
@patch('notification.tasks.TaskRegister.create_task', new=MagicMock())
@patch('notification.tasks.create_database_with_retry')
def test_call_database_create(self, create_database_mock):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name,
'plan': self.plan_name
}
)
self.assertTrue(create_database_mock.called)
self.assertEqual(resp.status_code, 201)
| en | 0.781346 | HTTP test cases for the tsuru Service Add. This class focuses on validations of POST | 2.301665 | 2 |
Main/migrations/0072_auto_20210506_0016.py | Muhammet-Yildiz/Ecommerce_Website-HepsiOrada | 10 | 8383 | # Generated by Django 3.1.4 on 2021-05-05 21:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Main', '0071_auto_20210506_0004'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='chooseColor',
),
migrations.RemoveField(
model_name='product',
name='chooseSize',
),
]
| # Generated by Django 3.1.4 on 2021-05-05 21:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Main', '0071_auto_20210506_0004'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='chooseColor',
),
migrations.RemoveField(
model_name='product',
name='chooseSize',
),
]
| en | 0.805638 | # Generated by Django 3.1.4 on 2021-05-05 21:16 | 1.531207 | 2 |
1.py | zweed4u/dailycodingproblem | 0 | 8384 | #!/usr/bin/python3
"""
Good morning! Here's your coding interview problem for today.
This problem was recently asked by Google.
Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
Bonus: Can you do this in one pass?
"""
def func(l, k):
sums = []
for index, element in enumerate(l):
print(f'Current element: {element}')
if index == 0:
# first element - need another
print()
continue
for num in range(index):
print(f'Appending {l[index]} + {l[num]}')
sums.append(l[num] + l[index])
print()
print(sums)
return k in sums
print(func([10, 15, 3, 7], 17))
| #!/usr/bin/python3
"""
Good morning! Here's your coding interview problem for today.
This problem was recently asked by Google.
Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
Bonus: Can you do this in one pass?
"""
def func(l, k):
sums = []
for index, element in enumerate(l):
print(f'Current element: {element}')
if index == 0:
# first element - need another
print()
continue
for num in range(index):
print(f'Appending {l[index]} + {l[num]}')
sums.append(l[num] + l[index])
print()
print(sums)
return k in sums
print(func([10, 15, 3, 7], 17))
| en | 0.897816 | #!/usr/bin/python3 Good morning! Here's your coding interview problem for today. This problem was recently asked by Google. Given a list of numbers and a number k, return whether any two numbers from the list add up to k. For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17. Bonus: Can you do this in one pass? # first element - need another | 3.883942 | 4 |
gryphon/data/template_scaffolding/template/setup.py | ow-gryphon/gryphon | 0 | 8385 | import json
import setuptools
with open("template/README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as fr:
requirements = fr.read().strip().split('\n')
with open('metadata.json') as fr:
metadata = json.load(fr)
setuptools.setup(
name="", # Name of the repository
version="0.0.1",
author=metadata.get("author", ""),
author_email=metadata.get("author_email", ""),
description=metadata.get("description", ""),
long_description=long_description,
long_description_content_type="text/markdown",
url="", # Repository URL or externally maintained page
packages=setuptools.find_packages(),
python_requires='>=3.6',
install_requires=requirements,
)
| import json
import setuptools
with open("template/README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as fr:
requirements = fr.read().strip().split('\n')
with open('metadata.json') as fr:
metadata = json.load(fr)
setuptools.setup(
name="", # Name of the repository
version="0.0.1",
author=metadata.get("author", ""),
author_email=metadata.get("author_email", ""),
description=metadata.get("description", ""),
long_description=long_description,
long_description_content_type="text/markdown",
url="", # Repository URL or externally maintained page
packages=setuptools.find_packages(),
python_requires='>=3.6',
install_requires=requirements,
)
| en | 0.52538 | # Name of the repository # Repository URL or externally maintained page | 1.817157 | 2 |
train_base3.py | Mhaiyang/iccv | 2 | 8386 | """
@Time : 201/21/19 10:41
@Author : TaylorMei
@Email : <EMAIL>
@Project : iccv
@File : train_base3.py
@Function:
"""
import datetime
import os
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import transforms
from tensorboardX import SummaryWriter
from tqdm import tqdm
import joint_transforms
from config import msd_training_root
from config import backbone_path
from dataset import ImageFolder
from misc import AvgMeter, check_mkdir
from model.base3 import BASE3
import loss as L
cudnn.benchmark = True
device_ids = [2]
ckpt_path = './ckpt'
exp_name = 'BASE3'
args = {
'epoch_num': 100,
'train_batch_size': 14,
'last_epoch': 0,
'lr': 5e-3,
'lr_decay': 0.9,
'weight_decay': 5e-4,
'momentum': 0.9,
'snapshot': '',
'scale': 384,
'save_point': [60, 80, 90],
'add_graph': True,
'poly_train': True,
'optimizer': 'SGD'
}
# Path.
check_mkdir(ckpt_path)
check_mkdir(os.path.join(ckpt_path, exp_name))
vis_path = os.path.join(ckpt_path, exp_name, 'log')
check_mkdir(vis_path)
log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt')
writer = SummaryWriter(log_dir=vis_path, comment=exp_name)
# Transform Data.
joint_transform = joint_transforms.Compose([
joint_transforms.RandomRotate(),
joint_transforms.Resize((args['scale'], args['scale']))
])
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # maybe can optimized.
])
target_transform = transforms.ToTensor()
# Prepare Data Set.
train_set = ImageFolder(msd_training_root, joint_transform, img_transform, target_transform)
print("Train set: {}".format(train_set.__len__()))
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=0, shuffle=True)
def main():
print(args)
print(exp_name)
net = BASE3(backbone_path).cuda(device_ids[0]).train()
if args['add_graph']:
writer.add_graph(net, input_to_model=torch.rand(
args['train_batch_size'], 3, args['scale'], args['scale']).cuda(device_ids[0]))
if args['optimizer'] == 'Adam':
print("Adam")
optimizer = optim.Adam([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': 1 * args['lr'], 'weight_decay': args['weight_decay']}
])
else:
print("SGD")
optimizer = optim.SGD([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': 1 * args['lr'], 'weight_decay': args['weight_decay']}
], momentum=args['momentum'])
if len(args['snapshot']) > 0:
print('Training Resumes From \'%s\'' % args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
net = nn.DataParallel(net, device_ids=device_ids)
print("Using {} GPU(s) to Train.".format(len(device_ids)))
open(log_path, 'w').write(str(args) + '\n\n')
train(net, optimizer)
writer.close()
def train(net, optimizer):
curr_iter = 1
for epoch in range(args['last_epoch'] + 1, args['last_epoch'] + 1 + args['epoch_num']):
loss_4_record, loss_3_record, loss_2_record, loss_1_record, \
loss_f_record, loss_record = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter()
train_iterator = tqdm(train_loader, total=len(train_loader))
for data in train_iterator:
if args['poly_train']:
base_lr = args['lr'] * (1 - float(curr_iter) / (args['epoch_num'] * len(train_loader))) ** args[
'lr_decay']
optimizer.param_groups[0]['lr'] = 2 * base_lr
optimizer.param_groups[1]['lr'] = 1 * base_lr
inputs, labels = data
batch_size = inputs.size(0)
inputs = Variable(inputs).cuda(device_ids[0])
labels = Variable(labels).cuda(device_ids[0])
optimizer.zero_grad()
predict_4, predict_3, predict_2, predict_1, predict_f = net(inputs)
loss_4 = L.lovasz_hinge(predict_4, labels)
loss_3 = L.lovasz_hinge(predict_3, labels)
loss_2 = L.lovasz_hinge(predict_2, labels)
loss_1 = L.lovasz_hinge(predict_1, labels)
loss_f = L.lovasz_hinge(predict_f, labels)
loss = loss_4 + loss_3 + loss_2 + loss_1 + loss_f
loss.backward()
optimizer.step()
loss_record.update(loss.data, batch_size)
loss_4_record.update(loss_4.data, batch_size)
loss_3_record.update(loss_3.data, batch_size)
loss_2_record.update(loss_2.data, batch_size)
loss_1_record.update(loss_1.data, batch_size)
loss_f_record.update(loss_f.data, batch_size)
if curr_iter % 50 == 0:
writer.add_scalar('loss', loss, curr_iter)
writer.add_scalar('loss_4', loss_4, curr_iter)
writer.add_scalar('loss_3', loss_3, curr_iter)
writer.add_scalar('loss_2', loss_2, curr_iter)
writer.add_scalar('loss_1', loss_1, curr_iter)
writer.add_scalar('loss_f', loss_f, curr_iter)
log = '[%3d], [%6d], [%.6f], [%.5f], [L4: %.5f], [L3: %.5f], [L2: %.5f], [L1: %.5f], [Lf: %.5f]' % \
(epoch, curr_iter, base_lr, loss_record.avg, loss_4_record.avg, loss_3_record.avg, loss_2_record.avg,
loss_1_record.avg, loss_f_record.avg)
train_iterator.set_description(log)
open(log_path, 'a').write(log + '\n')
curr_iter += 1
if epoch in args['save_point']:
net.cpu()
torch.save(net.module.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % epoch))
net.cuda(device_ids[0])
if epoch >= args['epoch_num']:
net.cpu()
torch.save(net.module.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % epoch))
print("Optimization Have Done!")
return
if __name__ == '__main__':
main()
| """
@Time : 201/21/19 10:41
@Author : TaylorMei
@Email : <EMAIL>
@Project : iccv
@File : train_base3.py
@Function:
"""
import datetime
import os
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import transforms
from tensorboardX import SummaryWriter
from tqdm import tqdm
import joint_transforms
from config import msd_training_root
from config import backbone_path
from dataset import ImageFolder
from misc import AvgMeter, check_mkdir
from model.base3 import BASE3
import loss as L
cudnn.benchmark = True
device_ids = [2]
ckpt_path = './ckpt'
exp_name = 'BASE3'
args = {
'epoch_num': 100,
'train_batch_size': 14,
'last_epoch': 0,
'lr': 5e-3,
'lr_decay': 0.9,
'weight_decay': 5e-4,
'momentum': 0.9,
'snapshot': '',
'scale': 384,
'save_point': [60, 80, 90],
'add_graph': True,
'poly_train': True,
'optimizer': 'SGD'
}
# Path.
check_mkdir(ckpt_path)
check_mkdir(os.path.join(ckpt_path, exp_name))
vis_path = os.path.join(ckpt_path, exp_name, 'log')
check_mkdir(vis_path)
log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt')
writer = SummaryWriter(log_dir=vis_path, comment=exp_name)
# Transform Data.
joint_transform = joint_transforms.Compose([
joint_transforms.RandomRotate(),
joint_transforms.Resize((args['scale'], args['scale']))
])
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # maybe can optimized.
])
target_transform = transforms.ToTensor()
# Prepare Data Set.
train_set = ImageFolder(msd_training_root, joint_transform, img_transform, target_transform)
print("Train set: {}".format(train_set.__len__()))
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=0, shuffle=True)
def main():
print(args)
print(exp_name)
net = BASE3(backbone_path).cuda(device_ids[0]).train()
if args['add_graph']:
writer.add_graph(net, input_to_model=torch.rand(
args['train_batch_size'], 3, args['scale'], args['scale']).cuda(device_ids[0]))
if args['optimizer'] == 'Adam':
print("Adam")
optimizer = optim.Adam([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': 1 * args['lr'], 'weight_decay': args['weight_decay']}
])
else:
print("SGD")
optimizer = optim.SGD([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': 1 * args['lr'], 'weight_decay': args['weight_decay']}
], momentum=args['momentum'])
if len(args['snapshot']) > 0:
print('Training Resumes From \'%s\'' % args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
net = nn.DataParallel(net, device_ids=device_ids)
print("Using {} GPU(s) to Train.".format(len(device_ids)))
open(log_path, 'w').write(str(args) + '\n\n')
train(net, optimizer)
writer.close()
def train(net, optimizer):
curr_iter = 1
for epoch in range(args['last_epoch'] + 1, args['last_epoch'] + 1 + args['epoch_num']):
loss_4_record, loss_3_record, loss_2_record, loss_1_record, \
loss_f_record, loss_record = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter()
train_iterator = tqdm(train_loader, total=len(train_loader))
for data in train_iterator:
if args['poly_train']:
base_lr = args['lr'] * (1 - float(curr_iter) / (args['epoch_num'] * len(train_loader))) ** args[
'lr_decay']
optimizer.param_groups[0]['lr'] = 2 * base_lr
optimizer.param_groups[1]['lr'] = 1 * base_lr
inputs, labels = data
batch_size = inputs.size(0)
inputs = Variable(inputs).cuda(device_ids[0])
labels = Variable(labels).cuda(device_ids[0])
optimizer.zero_grad()
predict_4, predict_3, predict_2, predict_1, predict_f = net(inputs)
loss_4 = L.lovasz_hinge(predict_4, labels)
loss_3 = L.lovasz_hinge(predict_3, labels)
loss_2 = L.lovasz_hinge(predict_2, labels)
loss_1 = L.lovasz_hinge(predict_1, labels)
loss_f = L.lovasz_hinge(predict_f, labels)
loss = loss_4 + loss_3 + loss_2 + loss_1 + loss_f
loss.backward()
optimizer.step()
loss_record.update(loss.data, batch_size)
loss_4_record.update(loss_4.data, batch_size)
loss_3_record.update(loss_3.data, batch_size)
loss_2_record.update(loss_2.data, batch_size)
loss_1_record.update(loss_1.data, batch_size)
loss_f_record.update(loss_f.data, batch_size)
if curr_iter % 50 == 0:
writer.add_scalar('loss', loss, curr_iter)
writer.add_scalar('loss_4', loss_4, curr_iter)
writer.add_scalar('loss_3', loss_3, curr_iter)
writer.add_scalar('loss_2', loss_2, curr_iter)
writer.add_scalar('loss_1', loss_1, curr_iter)
writer.add_scalar('loss_f', loss_f, curr_iter)
log = '[%3d], [%6d], [%.6f], [%.5f], [L4: %.5f], [L3: %.5f], [L2: %.5f], [L1: %.5f], [Lf: %.5f]' % \
(epoch, curr_iter, base_lr, loss_record.avg, loss_4_record.avg, loss_3_record.avg, loss_2_record.avg,
loss_1_record.avg, loss_f_record.avg)
train_iterator.set_description(log)
open(log_path, 'a').write(log + '\n')
curr_iter += 1
if epoch in args['save_point']:
net.cpu()
torch.save(net.module.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % epoch))
net.cuda(device_ids[0])
if epoch >= args['epoch_num']:
net.cpu()
torch.save(net.module.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % epoch))
print("Optimization Have Done!")
return
if __name__ == '__main__':
main()
| en | 0.484448 | @Time : 201/21/19 10:41 @Author : TaylorMei @Email : <EMAIL> @Project : iccv @File : train_base3.py @Function: # Path. # Transform Data. # maybe can optimized. # Prepare Data Set. | 1.749567 | 2 |
tests/test_comment.py | uwase-diane/min_pitch | 1 | 8387 | import unittest
from app.models import Comment, Pitch
from app import db
class TestPitchComment(unittest.TestCase):
def setUp(self):
self.new_pitch = Pitch(post = "doit", category='Quotes')
self.new_comment = Comment(comment = "good comment", pitch=self.new_pitch)
def test_instance(self):
self.assertTrue(isinstance(self.new_comment,Comment))
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment,"good comment")
self.assertEquals(self.new_comment.pitch,self.new_pitch, 'do it') | import unittest
from app.models import Comment, Pitch
from app import db
class TestPitchComment(unittest.TestCase):
def setUp(self):
self.new_pitch = Pitch(post = "doit", category='Quotes')
self.new_comment = Comment(comment = "good comment", pitch=self.new_pitch)
def test_instance(self):
self.assertTrue(isinstance(self.new_comment,Comment))
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment,"good comment")
self.assertEquals(self.new_comment.pitch,self.new_pitch, 'do it') | none | 1 | 3.247295 | 3 |
|
teacher/views.py | itteamforslp/safelife_project | 0 | 8388 | from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.template import loader
from django.db import connection
from django.http import HttpResponseRedirect
import datetime
from django.http import JsonResponse
from administrator.models import Course, CourseTeacher, CourseStudent, Student
from django.core.exceptions import PermissionDenied
def teacher_only(function):
#"""Limit view to teacher only."""
def _inner(request, *args, **kwargs):
if not request.user.is_staff == False | request.user.is_superuser:
raise PermissionDenied
return function(request, *args, **kwargs)
return _inner
@login_required(login_url = '/users')
@teacher_only
def home(request):
current_user = request.user.id
teacher_current_courses = Course.objects.select_related().raw('SELECT * '
'FROM course_teachers as CT, courses as C '
'WHERE CT.teachers_id = %s AND C.course_id = CT.course_id AND C.is_complete = 0 ', [current_user])
currentdate = datetime.datetime.today().strftime('%Y-%m-%d')
with connection.cursor() as cursor:
cursor.execute('SELECT CL.course_id, CL.date '
'FROM classes as CL, course_teachers as CT '
'WHERE CT.teachers_id = %s AND CL.date >= %s '
'AND CT.course_id = CL.course_id '
'GROUP BY CL.course_id ', [current_user, currentdate])
next_class_date = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('SELECT CS.course_id, COUNT(CS.students_id) '
'FROM course_teachers as CT, course_students as CS '
'WHERE CT.teachers_id = %s AND CT.course_id = CS.course_id '
'GROUP BY CS.course_id ', [current_user])
teacher_student_count = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('SELECT C.course_id, C.notes '
'FROM course_teachers as CT, courses as C '
'WHERE CT.teachers_id = %s AND C.course_id = CT.course_id '
'GROUP BY CT.course_id ', [current_user])
teacher_course_notes = cursor.fetchall()
template = loader.get_template('teacher/dashboard.html')
context = {
'teacher_current_courses': teacher_current_courses,
'teacher_student_count': teacher_student_count,
'next_class_date': next_class_date,
'teacher_course_notes': teacher_course_notes
}
# Render the template to the user
return HttpResponse(template.render(context, request))
@csrf_exempt
def update_course_notes(request):
# Get the student name that was passed from the web page
courseNotes = request.POST.get('courseNotes')
courseId = request.POST.get('courseId')
# Create a cursor to execute raw SQL queries.
with connection.cursor() as cursor:
cursor.execute('UPDATE courses '
'SET notes = %s '
'WHERE course_id = %s', [courseNotes, courseId])
# Render the response to the user
| from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.template import loader
from django.db import connection
from django.http import HttpResponseRedirect
import datetime
from django.http import JsonResponse
from administrator.models import Course, CourseTeacher, CourseStudent, Student
from django.core.exceptions import PermissionDenied
def teacher_only(function):
#"""Limit view to teacher only."""
def _inner(request, *args, **kwargs):
if not request.user.is_staff == False | request.user.is_superuser:
raise PermissionDenied
return function(request, *args, **kwargs)
return _inner
@login_required(login_url = '/users')
@teacher_only
def home(request):
current_user = request.user.id
teacher_current_courses = Course.objects.select_related().raw('SELECT * '
'FROM course_teachers as CT, courses as C '
'WHERE CT.teachers_id = %s AND C.course_id = CT.course_id AND C.is_complete = 0 ', [current_user])
currentdate = datetime.datetime.today().strftime('%Y-%m-%d')
with connection.cursor() as cursor:
cursor.execute('SELECT CL.course_id, CL.date '
'FROM classes as CL, course_teachers as CT '
'WHERE CT.teachers_id = %s AND CL.date >= %s '
'AND CT.course_id = CL.course_id '
'GROUP BY CL.course_id ', [current_user, currentdate])
next_class_date = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('SELECT CS.course_id, COUNT(CS.students_id) '
'FROM course_teachers as CT, course_students as CS '
'WHERE CT.teachers_id = %s AND CT.course_id = CS.course_id '
'GROUP BY CS.course_id ', [current_user])
teacher_student_count = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('SELECT C.course_id, C.notes '
'FROM course_teachers as CT, courses as C '
'WHERE CT.teachers_id = %s AND C.course_id = CT.course_id '
'GROUP BY CT.course_id ', [current_user])
teacher_course_notes = cursor.fetchall()
template = loader.get_template('teacher/dashboard.html')
context = {
'teacher_current_courses': teacher_current_courses,
'teacher_student_count': teacher_student_count,
'next_class_date': next_class_date,
'teacher_course_notes': teacher_course_notes
}
# Render the template to the user
return HttpResponse(template.render(context, request))
@csrf_exempt
def update_course_notes(request):
# Get the student name that was passed from the web page
courseNotes = request.POST.get('courseNotes')
courseId = request.POST.get('courseId')
# Create a cursor to execute raw SQL queries.
with connection.cursor() as cursor:
cursor.execute('UPDATE courses '
'SET notes = %s '
'WHERE course_id = %s', [courseNotes, courseId])
# Render the response to the user
| en | 0.91899 | #"""Limit view to teacher only.""" # Render the template to the user # Get the student name that was passed from the web page # Create a cursor to execute raw SQL queries. # Render the response to the user | 2.054544 | 2 |
botstory/middlewares/text/text_test.py | botstory/bot-story | 5 | 8389 | import logging
import pytest
import re
from . import text
from ... import matchers
from ...utils import answer, SimpleTrigger
logger = logging.getLogger(__name__)
@pytest.mark.asyncio
async def test_should_run_story_on_equal_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on('hi there!')
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.pure_text('hi there!')
assert trigger.is_triggered
@pytest.mark.asyncio
async def test_should_not_run_story_on_non_equal_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on('hi there!')
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.pure_text('buy!')
assert not trigger.is_triggered
@pytest.mark.asyncio
async def test_should_catch_any_text_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Any())
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.pure_text('hi there!')
assert trigger.is_triggered
@pytest.mark.asyncio
async def test_should_ignore_any_non_text_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Any())
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.location('some where')
assert not trigger.is_triggered
def test_serialize_text_any():
m_old = text.Any()
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.Any)
@pytest.mark.asyncio
async def test_should_catch_equal_text_message():
trigger_hi_there = SimpleTrigger()
trigger_see_you = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Equal('hi there!'))
def first_story():
@story.part()
def then(ctx):
trigger_hi_there.passed()
@story.on(text.Equal('see you!'))
def second_story():
@story.part()
def then(ctx):
trigger_see_you.passed()
await talk.pure_text('see you!')
assert not trigger_hi_there.is_triggered
assert trigger_see_you.is_triggered
def test_equal_handle_should_create_right_type():
assert isinstance(text.Equal.handle(''), text.Equal)
def test_serialize_text_equal():
m_old = text.Equal('hats off')
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.Equal)
assert m_new.test_string == 'hats off'
@pytest.mark.asyncio
async def test_should_catch_equal_text_message_case_in_sensitive():
trigger_hi_there = SimpleTrigger()
trigger_see_you = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.EqualCaseIgnore('hi there!'))
def first_story():
@story.part()
def then(ctx):
trigger_hi_there.passed()
@story.on(text.EqualCaseIgnore('see you!'))
def second_story():
@story.part()
def then(ctx):
trigger_see_you.passed()
await talk.pure_text('See You!')
assert not trigger_hi_there.is_triggered
assert trigger_see_you.is_triggered
def test_serialize_text_equal_case_ignore():
m_old = text.EqualCaseIgnore('hats off')
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.EqualCaseIgnore)
assert m_new.test_string == 'hats off'
@pytest.mark.asyncio
async def test_should_catch_text_message_that_match_regex():
trigger_buy = SimpleTrigger()
trigger_sell = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Match('buy (.*)btc'))
def one_story():
@story.part()
def then(ctx):
trigger_buy.receive(text.get_text(ctx)['matches'][0])
@story.on(text.Match('sell (.*)btc'))
def another_story():
@story.part()
def then(ctx):
trigger_sell.receive(text.get_text(ctx)['matches'][0])
await talk.pure_text('buy 700btc')
await talk.pure_text('sell 600btc')
assert trigger_buy.result() == '700'
assert trigger_sell.result() == '600'
@pytest.mark.asyncio
async def test_should_catch_text_message_that_match_regex_with_flags():
trigger_destination = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Match('going to (.*)', re.IGNORECASE))
def one_story():
@story.part()
def then(ctx):
logger.debug('ctx')
logger.debug(ctx)
trigger_destination.receive(text.get_text(ctx)['matches'][0])
await talk.pure_text('Going to Pripyat')
assert trigger_destination.result() == 'Pripyat'
@pytest.mark.asyncio
async def test_should_not_fail_on_empty_message():
with answer.Talk() as talk:
story = talk.story
@story.on(text.Match('going to (.*)', re.IGNORECASE))
def one_story():
@story.part()
def then(ctx):
pass
await talk.ask(None)
def test_serialize_text_match():
m_old = text.Match('hello (.*)', re.IGNORECASE)
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.Match)
assert m_new.matcher.match('Hello Piter!')
def test_text_qual_should_handle_text():
assert isinstance(matchers.get_validator('just pure text'), text.Equal)
| import logging
import pytest
import re
from . import text
from ... import matchers
from ...utils import answer, SimpleTrigger
logger = logging.getLogger(__name__)
@pytest.mark.asyncio
async def test_should_run_story_on_equal_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on('hi there!')
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.pure_text('hi there!')
assert trigger.is_triggered
@pytest.mark.asyncio
async def test_should_not_run_story_on_non_equal_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on('hi there!')
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.pure_text('buy!')
assert not trigger.is_triggered
@pytest.mark.asyncio
async def test_should_catch_any_text_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Any())
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.pure_text('hi there!')
assert trigger.is_triggered
@pytest.mark.asyncio
async def test_should_ignore_any_non_text_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Any())
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.location('some where')
assert not trigger.is_triggered
def test_serialize_text_any():
m_old = text.Any()
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.Any)
@pytest.mark.asyncio
async def test_should_catch_equal_text_message():
trigger_hi_there = SimpleTrigger()
trigger_see_you = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Equal('hi there!'))
def first_story():
@story.part()
def then(ctx):
trigger_hi_there.passed()
@story.on(text.Equal('see you!'))
def second_story():
@story.part()
def then(ctx):
trigger_see_you.passed()
await talk.pure_text('see you!')
assert not trigger_hi_there.is_triggered
assert trigger_see_you.is_triggered
def test_equal_handle_should_create_right_type():
assert isinstance(text.Equal.handle(''), text.Equal)
def test_serialize_text_equal():
m_old = text.Equal('hats off')
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.Equal)
assert m_new.test_string == 'hats off'
@pytest.mark.asyncio
async def test_should_catch_equal_text_message_case_in_sensitive():
trigger_hi_there = SimpleTrigger()
trigger_see_you = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.EqualCaseIgnore('hi there!'))
def first_story():
@story.part()
def then(ctx):
trigger_hi_there.passed()
@story.on(text.EqualCaseIgnore('see you!'))
def second_story():
@story.part()
def then(ctx):
trigger_see_you.passed()
await talk.pure_text('See You!')
assert not trigger_hi_there.is_triggered
assert trigger_see_you.is_triggered
def test_serialize_text_equal_case_ignore():
m_old = text.EqualCaseIgnore('hats off')
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.EqualCaseIgnore)
assert m_new.test_string == 'hats off'
@pytest.mark.asyncio
async def test_should_catch_text_message_that_match_regex():
trigger_buy = SimpleTrigger()
trigger_sell = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Match('buy (.*)btc'))
def one_story():
@story.part()
def then(ctx):
trigger_buy.receive(text.get_text(ctx)['matches'][0])
@story.on(text.Match('sell (.*)btc'))
def another_story():
@story.part()
def then(ctx):
trigger_sell.receive(text.get_text(ctx)['matches'][0])
await talk.pure_text('buy 700btc')
await talk.pure_text('sell 600btc')
assert trigger_buy.result() == '700'
assert trigger_sell.result() == '600'
@pytest.mark.asyncio
async def test_should_catch_text_message_that_match_regex_with_flags():
trigger_destination = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Match('going to (.*)', re.IGNORECASE))
def one_story():
@story.part()
def then(ctx):
logger.debug('ctx')
logger.debug(ctx)
trigger_destination.receive(text.get_text(ctx)['matches'][0])
await talk.pure_text('Going to Pripyat')
assert trigger_destination.result() == 'Pripyat'
@pytest.mark.asyncio
async def test_should_not_fail_on_empty_message():
with answer.Talk() as talk:
story = talk.story
@story.on(text.Match('going to (.*)', re.IGNORECASE))
def one_story():
@story.part()
def then(ctx):
pass
await talk.ask(None)
def test_serialize_text_match():
m_old = text.Match('hello (.*)', re.IGNORECASE)
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.Match)
assert m_new.matcher.match('Hello Piter!')
def test_text_qual_should_handle_text():
assert isinstance(matchers.get_validator('just pure text'), text.Equal)
| none | 1 | 2.229999 | 2 |
|
pywikibot/site/_datasite.py | xqt/pwb | 0 | 8390 | <gh_stars>0
"""Objects representing API interface to Wikibase site."""
#
# (C) Pywikibot team, 2012-2022
#
# Distributed under the terms of the MIT license.
#
import datetime
import json
import uuid
from contextlib import suppress
from typing import Optional
from warnings import warn
import pywikibot
from pywikibot.data import api
from pywikibot.exceptions import (
APIError,
EntityTypeUnknownError,
IsRedirectPageError,
NoPageError,
NoWikibaseEntityError,
)
from pywikibot.site._apisite import APISite
from pywikibot.site._decorators import need_extension, need_right, need_version
from pywikibot.tools import itergroup, merge_unique_dicts, remove_last_args
__all__ = ('DataSite', )
class DataSite(APISite):
"""Wikibase data capable site."""
def __init__(self, *args, **kwargs) -> None:
"""Initializer."""
super().__init__(*args, **kwargs)
self._item_namespace = None
self._property_namespace = None
self._type_to_class = {
'item': pywikibot.ItemPage,
'property': pywikibot.PropertyPage,
'mediainfo': pywikibot.MediaInfo,
'lexeme': pywikibot.LexemePage,
'form': pywikibot.LexemeForm,
'sense': pywikibot.LexemeSense,
}
def _cache_entity_namespaces(self) -> None:
"""Find namespaces for each known wikibase entity type."""
self._entity_namespaces = {}
for entity_type in self._type_to_class:
for namespace in self.namespaces.values():
if not hasattr(namespace, 'defaultcontentmodel'):
continue
content_model = namespace.defaultcontentmodel
if content_model == ('wikibase-' + entity_type):
self._entity_namespaces[entity_type] = namespace
break
def get_namespace_for_entity_type(self, entity_type):
"""
Return namespace for given entity type.
:return: corresponding namespace
:rtype: Namespace
"""
if not hasattr(self, '_entity_namespaces'):
self._cache_entity_namespaces()
if entity_type in self._entity_namespaces:
return self._entity_namespaces[entity_type]
raise EntityTypeUnknownError(
'{!r} does not support entity type "{}" '
"or it doesn't have its own namespace"
.format(self, entity_type))
@property
def item_namespace(self):
"""
Return namespace for items.
:return: item namespace
:rtype: Namespace
"""
if self._item_namespace is None:
self._item_namespace = self.get_namespace_for_entity_type('item')
return self._item_namespace
@property
def property_namespace(self):
"""
Return namespace for properties.
:return: property namespace
:rtype: Namespace
"""
if self._property_namespace is None:
self._property_namespace = self.get_namespace_for_entity_type(
'property')
return self._property_namespace
def get_entity_for_entity_id(self, entity_id):
"""
Return a new instance for given entity id.
:raises pywikibot.exceptions.NoWikibaseEntityError: there is no entity
with the id
:return: a WikibaseEntity subclass
:rtype: WikibaseEntity
"""
for cls in self._type_to_class.values():
if cls.is_valid_id(entity_id):
return cls(self, entity_id)
entity = pywikibot.page.WikibaseEntity(self, entity_id)
raise NoWikibaseEntityError(entity)
@property
@need_version('1.28-wmf.3')
def sparql_endpoint(self):
"""
Return the sparql endpoint url, if any has been set.
:return: sparql endpoint url
:rtype: str|None
"""
return self.siteinfo['general'].get('wikibase-sparql')
@property
@need_version('1.28-wmf.23')
def concept_base_uri(self):
"""
Return the base uri for concepts/entities.
:return: concept base uri
:rtype: str
"""
return self.siteinfo['general']['wikibase-conceptbaseuri']
def geo_shape_repository(self):
"""Return Site object for the geo-shapes repository e.g. commons."""
url = self.siteinfo['general'].get('wikibase-geoshapestoragebaseurl')
if url:
return pywikibot.Site(url=url, user=self.username())
return None
def tabular_data_repository(self):
"""Return Site object for the tabular-datas repository e.g. commons."""
url = self.siteinfo['general'].get(
'wikibase-tabulardatastoragebaseurl')
if url:
return pywikibot.Site(url=url, user=self.username())
return None
def loadcontent(self, identification, *props):
"""
Fetch the current content of a Wikibase item.
This is called loadcontent since
wbgetentities does not support fetching old
revisions. Eventually this will get replaced by
an actual loadrevisions.
:param identification: Parameters used to identify the page(s)
:type identification: dict
:param props: the optional properties to fetch.
"""
params = merge_unique_dicts(identification, action='wbgetentities',
# TODO: When props is empty it results in
# an empty string ('&props=') but it should
# result in a missing entry.
props=props if props else False)
req = self.simple_request(**params)
data = req.submit()
if 'success' not in data:
raise APIError(data['errors'], '')
return data['entities']
def preload_entities(self, pagelist, groupsize: int = 50):
"""
Yield subclasses of WikibaseEntity's with content prefilled.
Note that pages will be iterated in a different order
than in the underlying pagelist.
:param pagelist: an iterable that yields either WikibaseEntity objects,
or Page objects linked to an ItemPage.
:param groupsize: how many pages to query at a time
"""
if not hasattr(self, '_entity_namespaces'):
self._cache_entity_namespaces()
for sublist in itergroup(pagelist, groupsize):
req = {'ids': [], 'titles': [], 'sites': []}
for p in sublist:
if isinstance(p, pywikibot.page.WikibaseEntity):
ident = p._defined_by()
for key in ident:
req[key].append(ident[key])
else:
if p.site == self and p.namespace() in (
self._entity_namespaces.values()):
req['ids'].append(p.title(with_ns=False))
else:
assert p.site.has_data_repository, \
'Site must have a data repository'
req['sites'].append(p.site.dbName())
req['titles'].append(p._link._text)
req = self.simple_request(action='wbgetentities', **req)
data = req.submit()
for entity in data['entities']:
if 'missing' in data['entities'][entity]:
continue
cls = self._type_to_class[data['entities'][entity]['type']]
page = cls(self, entity)
# No api call is made because item._content is given
page._content = data['entities'][entity]
with suppress(IsRedirectPageError):
page.get() # cannot provide get_redirect=True (T145971)
yield page
def getPropertyType(self, prop):
"""
Obtain the type of a property.
This is used specifically because we can cache
the value for a much longer time (near infinite).
"""
params = {'action': 'wbgetentities', 'ids': prop.getID(),
'props': 'datatype'}
expiry = datetime.timedelta(days=365 * 100)
# Store it for 100 years
req = self._request(expiry=expiry, parameters=params)
data = req.submit()
# the IDs returned from the API can be upper or lowercase, depending
# on the version. See bug T55894 for more information.
try:
dtype = data['entities'][prop.getID()]['datatype']
except KeyError:
dtype = data['entities'][prop.getID().lower()]['datatype']
return dtype
@need_right('edit')
def editEntity(self, entity, data, bot: bool = True, **kwargs):
"""
Edit entity.
Note: This method is unable to create entities other than 'item'
if dict with API parameters was passed to 'entity' parameter.
:param entity: Page to edit, or dict with API parameters
to use for entity identification
:type entity: WikibaseEntity or dict
:param data: data updates
:type data: dict
:param bot: Whether to mark the edit as a bot edit
:return: New entity data
:rtype: dict
"""
# this changes the reference to a new object
data = dict(data)
if isinstance(entity, pywikibot.page.WikibaseEntity):
params = entity._defined_by(singular=True)
if 'id' in params and params['id'] == '-1':
del params['id']
if not params:
params['new'] = entity.entity_type
data_for_new_entity = entity.get_data_for_new_entity()
data.update(data_for_new_entity)
else:
if 'id' in entity and entity['id'] == '-1':
del entity['id']
params = dict(entity)
if not params: # If no identification was provided
params['new'] = 'item'
params['action'] = 'wbeditentity'
if bot:
params['bot'] = 1
if 'baserevid' in kwargs and kwargs['baserevid']:
params['baserevid'] = kwargs['baserevid']
params['token'] = self.tokens['edit']
for arg in kwargs:
if arg in ['clear', 'summary']:
params[arg] = kwargs[arg]
elif arg != 'baserevid':
warn('Unknown wbeditentity parameter {} ignored'.format(arg),
UserWarning, 2)
params['data'] = json.dumps(data)
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def addClaim(self, entity, claim, bot: bool = True, summary=None) -> None:
"""
Add a claim.
:param entity: Entity to modify
:type entity: WikibaseEntity
:param claim: Claim to be added
:type claim: pywikibot.Claim
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
claim.snak = entity.getID() + '$' + str(uuid.uuid4())
params = {'action': 'wbsetclaim',
'claim': json.dumps(claim.toJSON()),
'baserevid': entity.latest_revision_id,
'summary': summary,
'token': self.tokens['edit'],
'bot': bot,
}
req = self.simple_request(**params)
data = req.submit()
# Update the item
if claim.getID() in entity.claims:
entity.claims[claim.getID()].append(claim)
else:
entity.claims[claim.getID()] = [claim]
entity.latest_revision_id = data['pageinfo']['lastrevid']
@need_right('edit')
def changeClaimTarget(self, claim, snaktype: str = 'value',
bot: bool = True, summary=None):
"""
Set the claim target to the value of the provided claim target.
:param claim: The source of the claim target value
:type claim: pywikibot.Claim
:param snaktype: An optional snaktype ('value', 'novalue' or
'somevalue'). Default: 'value'
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
if claim.isReference or claim.isQualifier:
raise NotImplementedError
if not claim.snak:
# We need to already have the snak value
raise NoPageError(claim)
params = {'action': 'wbsetclaimvalue', 'claim': claim.snak,
'snaktype': snaktype, 'summary': summary, 'bot': bot,
'token': self.tokens['edit']}
if snaktype == 'value':
params['value'] = json.dumps(claim._formatValue())
params['baserevid'] = claim.on_item.latest_revision_id
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def save_claim(self, claim, summary=None, bot: bool = True):
"""
Save the whole claim to the wikibase site.
:param claim: The claim to save
:type claim: pywikibot.Claim
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
if claim.isReference or claim.isQualifier:
raise NotImplementedError
if not claim.snak:
# We need to already have the snak value
raise NoPageError(claim)
params = {'action': 'wbsetclaim',
'claim': json.dumps(claim.toJSON()),
'token': self.tokens['edit'],
'baserevid': claim.on_item.latest_revision_id,
'summary': summary,
'bot': bot,
}
req = self.simple_request(**params)
data = req.submit()
claim.on_item.latest_revision_id = data['pageinfo']['lastrevid']
return data
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def editSource(self, claim, source,
new: bool = False,
bot: bool = True,
summary: Optional[str] = None):
"""Create/Edit a source.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to add the source to
:type claim: pywikibot.Claim
:param source: A Claim object to be used as a source
:type source: pywikibot.Claim
:param new: Whether to create a new one if the "source" already exists
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
if claim.isReference or claim.isQualifier:
raise ValueError('The claim cannot have a source.')
params = {'action': 'wbsetreference', 'statement': claim.snak,
'baserevid': claim.on_item.latest_revision_id,
'summary': summary, 'bot': bot, 'token': self.tokens['edit']}
# build up the snak
if isinstance(source, list):
sources = source
else:
sources = [source]
snak = {}
for sourceclaim in sources:
datavalue = sourceclaim._formatDataValue()
valuesnaks = snak.get(sourceclaim.getID(), [])
valuesnaks.append({
'snaktype': 'value',
'property': sourceclaim.getID(),
'datavalue': datavalue,
})
snak[sourceclaim.getID()] = valuesnaks
# set the hash if the source should be changed.
# if present, all claims of one source have the same hash
if not new and hasattr(sourceclaim, 'hash'):
params['reference'] = sourceclaim.hash
params['snaks'] = json.dumps(snak)
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def editQualifier(self, claim, qualifier,
new: bool = False,
bot: bool = True,
summary: Optional[str] = None):
"""Create/Edit a qualifier.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to add the qualifier to
:type claim: pywikibot.Claim
:param qualifier: A Claim object to be used as a qualifier
:type qualifier: pywikibot.Claim
:param new: Whether to create a new one if the "qualifier"
already exists
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
if claim.isReference or claim.isQualifier:
raise ValueError('The claim cannot have a qualifier.')
params = {'action': 'wbsetqualifier', 'claim': claim.snak,
'baserevid': claim.on_item.latest_revision_id,
'summary': summary, 'bot': bot}
if (not new and hasattr(qualifier, 'hash')
and qualifier.hash is not None):
params['snakhash'] = qualifier.hash
params['token'] = self.tokens['edit']
# build up the snak
if qualifier.getSnakType() == 'value':
params['value'] = json.dumps(qualifier._formatValue())
params['snaktype'] = qualifier.getSnakType()
params['property'] = qualifier.getID()
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def removeClaims(self, claims,
bot: bool = True,
summary: Optional[str] = None):
"""Remove claims.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claims: Claims to be removed
:type claims: List[pywikibot.Claim]
:param bot: Whether to mark the edit as a bot edit
:type bot: bool
:param summary: Edit summary
:type summary: str
"""
# Check on_item for all additional claims
items = {claim.on_item for claim in claims if claim.on_item}
assert len(items) == 1
baserevid = items.pop().latest_revision_id
params = {
'action': 'wbremoveclaims', 'baserevid': baserevid,
'summary': summary,
'bot': bot,
'claim': '|'.join(claim.snak for claim in claims),
'token': self.tokens['edit'],
}
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def removeSources(self, claim, sources,
bot: bool = True,
summary: Optional[str] = None):
"""Remove sources.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to remove the sources from
:type claim: pywikibot.Claim
:param sources: A list of Claim objects that are sources
:type sources: list
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
params = {
'action': 'wbremovereferences',
'baserevid': claim.on_item.latest_revision_id,
'summary': summary, 'bot': bot,
'statement': claim.snak,
'references': '|'.join(source.hash for source in sources),
'token': self.tokens['edit'],
}
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def remove_qualifiers(self, claim, qualifiers,
bot: bool = True,
summary: Optional[str] = None):
"""Remove qualifiers.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to remove the qualifier from
:type claim: pywikibot.Claim
:param qualifiers: Claim objects currently used as a qualifiers
:type qualifiers: List[pywikibot.Claim]
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
params = {
'action': 'wbremovequalifiers',
'claim': claim.snak,
'baserevid': claim.on_item.latest_revision_id,
'summary': summary,
'bot': bot,
'qualifiers': [qualifier.hash for qualifier in qualifiers],
'token': self.tokens['edit']
}
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def linkTitles(self, page1, page2, bot: bool = True):
"""
Link two pages together.
:param page1: First page to link
:type page1: pywikibot.Page
:param page2: Second page to link
:type page2: pywikibot.Page
:param bot: Whether to mark the edit as a bot edit
:return: dict API output
:rtype: dict
"""
params = {
'action': 'wblinktitles',
'tosite': page1.site.dbName(),
'totitle': page1.title(),
'fromsite': page2.site.dbName(),
'fromtitle': page2.title(),
'token': self.tokens['edit']
}
if bot:
params['bot'] = 1
req = self.simple_request(**params)
return req.submit()
@need_right('item-merge')
def mergeItems(self, from_item, to_item, ignore_conflicts=None,
summary=None, bot: bool = True):
"""
Merge two items together.
:param from_item: Item to merge from
:type from_item: pywikibot.ItemPage
:param to_item: Item to merge into
:type to_item: pywikibot.ItemPage
:param ignore_conflicts: Which type of conflicts
('description', 'sitelink', and 'statement')
should be ignored
:type ignore_conflicts: list of str
:param summary: Edit summary
:type summary: str
:param bot: Whether to mark the edit as a bot edit
:return: dict API output
:rtype: dict
"""
params = {
'action': 'wbmergeitems',
'fromid': from_item.getID(),
'toid': to_item.getID(),
'ignoreconflicts': ignore_conflicts,
'token': self.tokens['edit'],
'summary': summary,
}
if bot:
params['bot'] = 1
req = self.simple_request(**params)
return req.submit()
@need_right('item-merge')
@need_extension('WikibaseLexeme')
def mergeLexemes(self, from_lexeme, to_lexeme, summary=None, *,
bot: bool = True) -> dict:
"""
Merge two lexemes together.
:param from_lexeme: Lexeme to merge from
:type from_lexeme: pywikibot.LexemePage
:param to_lexeme: Lexeme to merge into
:type to_lexeme: pywikibot.LexemePage
:param summary: Edit summary
:type summary: str
:keyword bot: Whether to mark the edit as a bot edit
:return: dict API output
"""
params = {
'action': 'wblmergelexemes',
'source': from_lexeme.getID(),
'target': to_lexeme.getID(),
'token': self.tokens['edit'],
'summary': summary,
}
if bot:
params['bot'] = 1
req = self.simple_request(**params)
data = req.submit()
return data
@need_right('item-redirect')
def set_redirect_target(self, from_item, to_item, bot: bool = True):
"""
Make a redirect to another item.
:param to_item: title of target item.
:type to_item: pywikibot.ItemPage
:param from_item: Title of the item to be redirected.
:type from_item: pywikibot.ItemPage
:param bot: Whether to mark the edit as a bot edit
"""
params = {
'action': 'wbcreateredirect',
'from': from_item.getID(),
'to': to_item.getID(),
'token': self.tokens['edit'],
'bot': bot,
}
req = self.simple_request(**params)
return req.submit()
def search_entities(self, search: str, language: str,
total: Optional[int] = None, **kwargs):
"""
Search for pages or properties that contain the given text.
:param search: Text to find.
:param language: Language to search in.
:param total: Maximum number of pages to retrieve in total, or
None in case of no limit.
:return: 'search' list from API output.
:rtype: Generator
"""
lang_codes = self._paraminfo.parameter('wbsearchentities',
'language')['type']
if language not in lang_codes:
raise ValueError('Data site used does not support provided '
'language.')
if 'site' in kwargs:
if kwargs['site'].sitename != self.sitename:
raise ValueError('The site given in the kwargs is different.')
warn('search_entities should not get a site via kwargs.',
UserWarning, 2)
del kwargs['site']
parameters = dict(search=search, language=language, **kwargs)
gen = self._generator(api.APIGenerator,
type_arg='wbsearchentities',
data_name='search',
total=total, parameters=parameters)
return gen
@need_right('edit')
def _wbset_action(self, itemdef, action: str, action_data,
**kwargs) -> dict:
"""
Execute wbset{action} on a Wikibase entity.
Supported actions are:
wbsetaliases, wbsetdescription, wbsetlabel and wbsetsitelink
:param itemdef: Entity to modify or create
:type itemdef: str, WikibaseEntity or Page connected to such item
:param action: wbset{action} to perform:
'wbsetaliases', 'wbsetdescription', 'wbsetlabel', 'wbsetsitelink'
:param action_data: data to be used in API request, see API help
:type action_data: SiteLink or dict
wbsetaliases:
dict shall have the following structure:
{'language': value (str),
'add': list of language codes (str),
'remove': list of language codes (str),
'set' list of language codes (str)
}
'add' and 'remove' are alternative to 'set'
wbsetdescription and wbsetlabel:
dict shall have keys 'language', 'value'
wbsetsitelink:
dict shall have keys 'linksite', 'linktitle' and
optionally 'badges'
:keyword bot: Whether to mark the edit as a bot edit, default is True
:type bot: bool
:keyword tags: Change tags to apply with the edit
:type tags: list of str
:return: query result
:raises AssertionError, TypeError
"""
def format_sitelink(sitelink):
"""Convert SiteLink to a dict accepted by wbsetsitelink API."""
if isinstance(sitelink, pywikibot.page.SiteLink):
_dict = {
'linksite': sitelink._sitekey,
'linktitle': sitelink._rawtitle,
'badges': '|'.join([b.title() for b in sitelink.badges]),
}
else:
_dict = sitelink
return _dict
def prepare_data(action, data):
"""Prepare data as expected by API."""
if action == 'wbsetaliases':
res = data
keys = set(res)
assert keys < {'language', 'add', 'remove', 'set'}
assert 'language' in keys
assert ({'add', 'remove', 'set'} & keys)
assert ({'add', 'set'} >= keys)
assert ({'remove', 'set'} >= keys)
elif action in ('wbsetlabel', 'wbsetdescription'):
res = data
keys = set(res)
assert keys == {'language', 'value'}
elif action == 'wbsetsitelink':
res = format_sitelink(data)
keys = set(res)
assert keys >= {'linksite'}
assert keys <= {'linksite', 'linktitle', 'badges'}
else:
raise ValueError('Something has gone wrong ...')
return res
# Supported actions
assert action in ('wbsetaliases', 'wbsetdescription',
'wbsetlabel', 'wbsetsitelink'), \
'action {} not supported.'.format(action)
# prefer ID over (site, title)
if isinstance(itemdef, str):
itemdef = self.get_entity_for_entity_id(itemdef)
elif isinstance(itemdef, pywikibot.Page):
itemdef = pywikibot.ItemPage.fromPage(itemdef, lazy_load=True)
elif not isinstance(itemdef, pywikibot.page.WikibaseEntity):
raise TypeError('itemdef shall be str, WikibaseEntity or Page')
params = itemdef._defined_by(singular=True)
# TODO: support 'new'
baserevid = kwargs.pop(
'baserevid',
itemdef.latest_revision_id if 'id' in params else 0
)
params.update(
{'baserevid': baserevid,
'action': action,
'token': self.tokens['edit'],
'bot': kwargs.pop('bot', True),
})
params.update(prepare_data(action, action_data))
for arg in kwargs:
if arg in ['summary', 'tags']:
params[arg] = kwargs[arg]
else:
warn('Unknown parameter {} for action {}, ignored'
.format(arg, action), UserWarning, 2)
req = self.simple_request(**params)
data = req.submit()
return data
def wbsetaliases(self, itemdef, aliases, **kwargs):
"""
Set aliases for a single Wikibase entity.
See self._wbset_action() for parameters
"""
return self._wbset_action(itemdef, 'wbsetaliases', aliases, **kwargs)
def wbsetdescription(self, itemdef, description, **kwargs):
"""
Set description for a single Wikibase entity.
See self._wbset_action()
"""
return self._wbset_action(itemdef, 'wbsetdescription', description,
**kwargs)
def wbsetlabel(self, itemdef, label, **kwargs):
"""
Set label for a single Wikibase entity.
See self._wbset_action() for parameters
"""
return self._wbset_action(itemdef, 'wbsetlabel', label, **kwargs)
def wbsetsitelink(self, itemdef, sitelink, **kwargs):
"""
Set, remove or modify a sitelink on a Wikibase item.
See self._wbset_action() for parameters
"""
return self._wbset_action(itemdef, 'wbsetsitelink', sitelink, **kwargs)
@need_right('edit')
@need_extension('WikibaseLexeme')
def add_form(self, lexeme, form, *, bot: bool = True,
baserevid=None) -> dict:
"""
Add a form.
:param lexeme: Lexeme to modify
:type lexeme: pywikibot.LexemePage
:param form: Form to be added
:type form: pywikibot.LexemeForm
:keyword bot: Whether to mark the edit as a bot edit
:keyword baserevid: Base revision id override, used to detect
conflicts.
:type baserevid: long
"""
params = {
'action': 'wbladdform',
'lexemeId': lexeme.getID(),
'data': json.dumps(form.toJSON()),
'bot': bot,
'token': self.tokens['edit'],
}
if baserevid:
params['baserevid'] = baserevid
req = self.simple_request(**params)
data = req.submit()
return data
@need_right('edit')
@need_extension('WikibaseLexeme')
def remove_form(self, form, *, bot: bool = True, baserevid=None) -> dict:
"""
Remove a form.
:param form: Form to be removed
:type form: pywikibot.LexemeForm
:keyword bot: Whether to mark the edit as a bot edit
:keyword baserevid: Base revision id override, used to detect
conflicts.
:type baserevid: long
"""
params = {
'action': 'wblremoveform',
'id': form.getID(),
'bot': bot,
'token': self.tokens['edit'],
}
if baserevid:
params['baserevid'] = baserevid
req = self.simple_request(**params)
data = req.submit()
return data
@need_right('edit')
@need_extension('WikibaseLexeme')
def edit_form_elements(self, form, data, *, bot: bool = True,
baserevid=None) -> dict:
"""
Edit lexeme form elements.
:param form: Form
:type form: pywikibot.LexemeForm
:param data: data updates
:type data: dict
:keyword bot: Whether to mark the edit as a bot edit
:keyword baserevid: Base revision id override, used to detect
conflicts.
:type baserevid: long
:return: New form data
"""
params = {
'action': 'wbleditformelements',
'formId': form.getID(),
'data': json.dumps(data),
'bot': bot,
'token': self.tokens['edit'],
}
if baserevid:
params['baserevid'] = baserevid
req = self.simple_request(**params)
data = req.submit()
return data
| """Objects representing API interface to Wikibase site."""
#
# (C) Pywikibot team, 2012-2022
#
# Distributed under the terms of the MIT license.
#
import datetime
import json
import uuid
from contextlib import suppress
from typing import Optional
from warnings import warn
import pywikibot
from pywikibot.data import api
from pywikibot.exceptions import (
APIError,
EntityTypeUnknownError,
IsRedirectPageError,
NoPageError,
NoWikibaseEntityError,
)
from pywikibot.site._apisite import APISite
from pywikibot.site._decorators import need_extension, need_right, need_version
from pywikibot.tools import itergroup, merge_unique_dicts, remove_last_args
__all__ = ('DataSite', )
class DataSite(APISite):
"""Wikibase data capable site."""
def __init__(self, *args, **kwargs) -> None:
"""Initializer."""
super().__init__(*args, **kwargs)
self._item_namespace = None
self._property_namespace = None
self._type_to_class = {
'item': pywikibot.ItemPage,
'property': pywikibot.PropertyPage,
'mediainfo': pywikibot.MediaInfo,
'lexeme': pywikibot.LexemePage,
'form': pywikibot.LexemeForm,
'sense': pywikibot.LexemeSense,
}
def _cache_entity_namespaces(self) -> None:
"""Find namespaces for each known wikibase entity type."""
self._entity_namespaces = {}
for entity_type in self._type_to_class:
for namespace in self.namespaces.values():
if not hasattr(namespace, 'defaultcontentmodel'):
continue
content_model = namespace.defaultcontentmodel
if content_model == ('wikibase-' + entity_type):
self._entity_namespaces[entity_type] = namespace
break
def get_namespace_for_entity_type(self, entity_type):
"""
Return namespace for given entity type.
:return: corresponding namespace
:rtype: Namespace
"""
if not hasattr(self, '_entity_namespaces'):
self._cache_entity_namespaces()
if entity_type in self._entity_namespaces:
return self._entity_namespaces[entity_type]
raise EntityTypeUnknownError(
'{!r} does not support entity type "{}" '
"or it doesn't have its own namespace"
.format(self, entity_type))
@property
def item_namespace(self):
"""
Return namespace for items.
:return: item namespace
:rtype: Namespace
"""
if self._item_namespace is None:
self._item_namespace = self.get_namespace_for_entity_type('item')
return self._item_namespace
@property
def property_namespace(self):
"""
Return namespace for properties.
:return: property namespace
:rtype: Namespace
"""
if self._property_namespace is None:
self._property_namespace = self.get_namespace_for_entity_type(
'property')
return self._property_namespace
def get_entity_for_entity_id(self, entity_id):
"""
Return a new instance for given entity id.
:raises pywikibot.exceptions.NoWikibaseEntityError: there is no entity
with the id
:return: a WikibaseEntity subclass
:rtype: WikibaseEntity
"""
for cls in self._type_to_class.values():
if cls.is_valid_id(entity_id):
return cls(self, entity_id)
entity = pywikibot.page.WikibaseEntity(self, entity_id)
raise NoWikibaseEntityError(entity)
@property
@need_version('1.28-wmf.3')
def sparql_endpoint(self):
"""
Return the sparql endpoint url, if any has been set.
:return: sparql endpoint url
:rtype: str|None
"""
return self.siteinfo['general'].get('wikibase-sparql')
@property
@need_version('1.28-wmf.23')
def concept_base_uri(self):
"""
Return the base uri for concepts/entities.
:return: concept base uri
:rtype: str
"""
return self.siteinfo['general']['wikibase-conceptbaseuri']
def geo_shape_repository(self):
"""Return Site object for the geo-shapes repository e.g. commons."""
url = self.siteinfo['general'].get('wikibase-geoshapestoragebaseurl')
if url:
return pywikibot.Site(url=url, user=self.username())
return None
def tabular_data_repository(self):
"""Return Site object for the tabular-datas repository e.g. commons."""
url = self.siteinfo['general'].get(
'wikibase-tabulardatastoragebaseurl')
if url:
return pywikibot.Site(url=url, user=self.username())
return None
def loadcontent(self, identification, *props):
"""
Fetch the current content of a Wikibase item.
This is called loadcontent since
wbgetentities does not support fetching old
revisions. Eventually this will get replaced by
an actual loadrevisions.
:param identification: Parameters used to identify the page(s)
:type identification: dict
:param props: the optional properties to fetch.
"""
params = merge_unique_dicts(identification, action='wbgetentities',
# TODO: When props is empty it results in
# an empty string ('&props=') but it should
# result in a missing entry.
props=props if props else False)
req = self.simple_request(**params)
data = req.submit()
if 'success' not in data:
raise APIError(data['errors'], '')
return data['entities']
def preload_entities(self, pagelist, groupsize: int = 50):
"""
Yield subclasses of WikibaseEntity's with content prefilled.
Note that pages will be iterated in a different order
than in the underlying pagelist.
:param pagelist: an iterable that yields either WikibaseEntity objects,
or Page objects linked to an ItemPage.
:param groupsize: how many pages to query at a time
"""
if not hasattr(self, '_entity_namespaces'):
self._cache_entity_namespaces()
for sublist in itergroup(pagelist, groupsize):
req = {'ids': [], 'titles': [], 'sites': []}
for p in sublist:
if isinstance(p, pywikibot.page.WikibaseEntity):
ident = p._defined_by()
for key in ident:
req[key].append(ident[key])
else:
if p.site == self and p.namespace() in (
self._entity_namespaces.values()):
req['ids'].append(p.title(with_ns=False))
else:
assert p.site.has_data_repository, \
'Site must have a data repository'
req['sites'].append(p.site.dbName())
req['titles'].append(p._link._text)
req = self.simple_request(action='wbgetentities', **req)
data = req.submit()
for entity in data['entities']:
if 'missing' in data['entities'][entity]:
continue
cls = self._type_to_class[data['entities'][entity]['type']]
page = cls(self, entity)
# No api call is made because item._content is given
page._content = data['entities'][entity]
with suppress(IsRedirectPageError):
page.get() # cannot provide get_redirect=True (T145971)
yield page
def getPropertyType(self, prop):
"""
Obtain the type of a property.
This is used specifically because we can cache
the value for a much longer time (near infinite).
"""
params = {'action': 'wbgetentities', 'ids': prop.getID(),
'props': 'datatype'}
expiry = datetime.timedelta(days=365 * 100)
# Store it for 100 years
req = self._request(expiry=expiry, parameters=params)
data = req.submit()
# the IDs returned from the API can be upper or lowercase, depending
# on the version. See bug T55894 for more information.
try:
dtype = data['entities'][prop.getID()]['datatype']
except KeyError:
dtype = data['entities'][prop.getID().lower()]['datatype']
return dtype
@need_right('edit')
def editEntity(self, entity, data, bot: bool = True, **kwargs):
"""
Edit entity.
Note: This method is unable to create entities other than 'item'
if dict with API parameters was passed to 'entity' parameter.
:param entity: Page to edit, or dict with API parameters
to use for entity identification
:type entity: WikibaseEntity or dict
:param data: data updates
:type data: dict
:param bot: Whether to mark the edit as a bot edit
:return: New entity data
:rtype: dict
"""
# this changes the reference to a new object
data = dict(data)
if isinstance(entity, pywikibot.page.WikibaseEntity):
params = entity._defined_by(singular=True)
if 'id' in params and params['id'] == '-1':
del params['id']
if not params:
params['new'] = entity.entity_type
data_for_new_entity = entity.get_data_for_new_entity()
data.update(data_for_new_entity)
else:
if 'id' in entity and entity['id'] == '-1':
del entity['id']
params = dict(entity)
if not params: # If no identification was provided
params['new'] = 'item'
params['action'] = 'wbeditentity'
if bot:
params['bot'] = 1
if 'baserevid' in kwargs and kwargs['baserevid']:
params['baserevid'] = kwargs['baserevid']
params['token'] = self.tokens['edit']
for arg in kwargs:
if arg in ['clear', 'summary']:
params[arg] = kwargs[arg]
elif arg != 'baserevid':
warn('Unknown wbeditentity parameter {} ignored'.format(arg),
UserWarning, 2)
params['data'] = json.dumps(data)
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def addClaim(self, entity, claim, bot: bool = True, summary=None) -> None:
"""
Add a claim.
:param entity: Entity to modify
:type entity: WikibaseEntity
:param claim: Claim to be added
:type claim: pywikibot.Claim
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
claim.snak = entity.getID() + '$' + str(uuid.uuid4())
params = {'action': 'wbsetclaim',
'claim': json.dumps(claim.toJSON()),
'baserevid': entity.latest_revision_id,
'summary': summary,
'token': self.tokens['edit'],
'bot': bot,
}
req = self.simple_request(**params)
data = req.submit()
# Update the item
if claim.getID() in entity.claims:
entity.claims[claim.getID()].append(claim)
else:
entity.claims[claim.getID()] = [claim]
entity.latest_revision_id = data['pageinfo']['lastrevid']
@need_right('edit')
def changeClaimTarget(self, claim, snaktype: str = 'value',
bot: bool = True, summary=None):
"""
Set the claim target to the value of the provided claim target.
:param claim: The source of the claim target value
:type claim: pywikibot.Claim
:param snaktype: An optional snaktype ('value', 'novalue' or
'somevalue'). Default: 'value'
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
if claim.isReference or claim.isQualifier:
raise NotImplementedError
if not claim.snak:
# We need to already have the snak value
raise NoPageError(claim)
params = {'action': 'wbsetclaimvalue', 'claim': claim.snak,
'snaktype': snaktype, 'summary': summary, 'bot': bot,
'token': self.tokens['edit']}
if snaktype == 'value':
params['value'] = json.dumps(claim._formatValue())
params['baserevid'] = claim.on_item.latest_revision_id
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def save_claim(self, claim, summary=None, bot: bool = True):
"""
Save the whole claim to the wikibase site.
:param claim: The claim to save
:type claim: pywikibot.Claim
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
if claim.isReference or claim.isQualifier:
raise NotImplementedError
if not claim.snak:
# We need to already have the snak value
raise NoPageError(claim)
params = {'action': 'wbsetclaim',
'claim': json.dumps(claim.toJSON()),
'token': self.tokens['edit'],
'baserevid': claim.on_item.latest_revision_id,
'summary': summary,
'bot': bot,
}
req = self.simple_request(**params)
data = req.submit()
claim.on_item.latest_revision_id = data['pageinfo']['lastrevid']
return data
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def editSource(self, claim, source,
new: bool = False,
bot: bool = True,
summary: Optional[str] = None):
"""Create/Edit a source.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to add the source to
:type claim: pywikibot.Claim
:param source: A Claim object to be used as a source
:type source: pywikibot.Claim
:param new: Whether to create a new one if the "source" already exists
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
if claim.isReference or claim.isQualifier:
raise ValueError('The claim cannot have a source.')
params = {'action': 'wbsetreference', 'statement': claim.snak,
'baserevid': claim.on_item.latest_revision_id,
'summary': summary, 'bot': bot, 'token': self.tokens['edit']}
# build up the snak
if isinstance(source, list):
sources = source
else:
sources = [source]
snak = {}
for sourceclaim in sources:
datavalue = sourceclaim._formatDataValue()
valuesnaks = snak.get(sourceclaim.getID(), [])
valuesnaks.append({
'snaktype': 'value',
'property': sourceclaim.getID(),
'datavalue': datavalue,
})
snak[sourceclaim.getID()] = valuesnaks
# set the hash if the source should be changed.
# if present, all claims of one source have the same hash
if not new and hasattr(sourceclaim, 'hash'):
params['reference'] = sourceclaim.hash
params['snaks'] = json.dumps(snak)
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def editQualifier(self, claim, qualifier,
new: bool = False,
bot: bool = True,
summary: Optional[str] = None):
"""Create/Edit a qualifier.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to add the qualifier to
:type claim: pywikibot.Claim
:param qualifier: A Claim object to be used as a qualifier
:type qualifier: pywikibot.Claim
:param new: Whether to create a new one if the "qualifier"
already exists
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
if claim.isReference or claim.isQualifier:
raise ValueError('The claim cannot have a qualifier.')
params = {'action': 'wbsetqualifier', 'claim': claim.snak,
'baserevid': claim.on_item.latest_revision_id,
'summary': summary, 'bot': bot}
if (not new and hasattr(qualifier, 'hash')
and qualifier.hash is not None):
params['snakhash'] = qualifier.hash
params['token'] = self.tokens['edit']
# build up the snak
if qualifier.getSnakType() == 'value':
params['value'] = json.dumps(qualifier._formatValue())
params['snaktype'] = qualifier.getSnakType()
params['property'] = qualifier.getID()
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def removeClaims(self, claims,
bot: bool = True,
summary: Optional[str] = None):
"""Remove claims.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claims: Claims to be removed
:type claims: List[pywikibot.Claim]
:param bot: Whether to mark the edit as a bot edit
:type bot: bool
:param summary: Edit summary
:type summary: str
"""
# Check on_item for all additional claims
items = {claim.on_item for claim in claims if claim.on_item}
assert len(items) == 1
baserevid = items.pop().latest_revision_id
params = {
'action': 'wbremoveclaims', 'baserevid': baserevid,
'summary': summary,
'bot': bot,
'claim': '|'.join(claim.snak for claim in claims),
'token': self.tokens['edit'],
}
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def removeSources(self, claim, sources,
bot: bool = True,
summary: Optional[str] = None):
"""Remove sources.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to remove the sources from
:type claim: pywikibot.Claim
:param sources: A list of Claim objects that are sources
:type sources: list
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
params = {
'action': 'wbremovereferences',
'baserevid': claim.on_item.latest_revision_id,
'summary': summary, 'bot': bot,
'statement': claim.snak,
'references': '|'.join(source.hash for source in sources),
'token': self.tokens['edit'],
}
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def remove_qualifiers(self, claim, qualifiers,
bot: bool = True,
summary: Optional[str] = None):
"""Remove qualifiers.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to remove the qualifier from
:type claim: pywikibot.Claim
:param qualifiers: Claim objects currently used as a qualifiers
:type qualifiers: List[pywikibot.Claim]
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
params = {
'action': 'wbremovequalifiers',
'claim': claim.snak,
'baserevid': claim.on_item.latest_revision_id,
'summary': summary,
'bot': bot,
'qualifiers': [qualifier.hash for qualifier in qualifiers],
'token': self.tokens['edit']
}
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def linkTitles(self, page1, page2, bot: bool = True):
"""
Link two pages together.
:param page1: First page to link
:type page1: pywikibot.Page
:param page2: Second page to link
:type page2: pywikibot.Page
:param bot: Whether to mark the edit as a bot edit
:return: dict API output
:rtype: dict
"""
params = {
'action': 'wblinktitles',
'tosite': page1.site.dbName(),
'totitle': page1.title(),
'fromsite': page2.site.dbName(),
'fromtitle': page2.title(),
'token': self.tokens['edit']
}
if bot:
params['bot'] = 1
req = self.simple_request(**params)
return req.submit()
@need_right('item-merge')
def mergeItems(self, from_item, to_item, ignore_conflicts=None,
summary=None, bot: bool = True):
"""
Merge two items together.
:param from_item: Item to merge from
:type from_item: pywikibot.ItemPage
:param to_item: Item to merge into
:type to_item: pywikibot.ItemPage
:param ignore_conflicts: Which type of conflicts
('description', 'sitelink', and 'statement')
should be ignored
:type ignore_conflicts: list of str
:param summary: Edit summary
:type summary: str
:param bot: Whether to mark the edit as a bot edit
:return: dict API output
:rtype: dict
"""
params = {
'action': 'wbmergeitems',
'fromid': from_item.getID(),
'toid': to_item.getID(),
'ignoreconflicts': ignore_conflicts,
'token': self.tokens['edit'],
'summary': summary,
}
if bot:
params['bot'] = 1
req = self.simple_request(**params)
return req.submit()
@need_right('item-merge')
@need_extension('WikibaseLexeme')
def mergeLexemes(self, from_lexeme, to_lexeme, summary=None, *,
bot: bool = True) -> dict:
"""
Merge two lexemes together.
:param from_lexeme: Lexeme to merge from
:type from_lexeme: pywikibot.LexemePage
:param to_lexeme: Lexeme to merge into
:type to_lexeme: pywikibot.LexemePage
:param summary: Edit summary
:type summary: str
:keyword bot: Whether to mark the edit as a bot edit
:return: dict API output
"""
params = {
'action': 'wblmergelexemes',
'source': from_lexeme.getID(),
'target': to_lexeme.getID(),
'token': self.tokens['edit'],
'summary': summary,
}
if bot:
params['bot'] = 1
req = self.simple_request(**params)
data = req.submit()
return data
@need_right('item-redirect')
def set_redirect_target(self, from_item, to_item, bot: bool = True):
"""
Make a redirect to another item.
:param to_item: title of target item.
:type to_item: pywikibot.ItemPage
:param from_item: Title of the item to be redirected.
:type from_item: pywikibot.ItemPage
:param bot: Whether to mark the edit as a bot edit
"""
params = {
'action': 'wbcreateredirect',
'from': from_item.getID(),
'to': to_item.getID(),
'token': self.tokens['edit'],
'bot': bot,
}
req = self.simple_request(**params)
return req.submit()
def search_entities(self, search: str, language: str,
total: Optional[int] = None, **kwargs):
"""
Search for pages or properties that contain the given text.
:param search: Text to find.
:param language: Language to search in.
:param total: Maximum number of pages to retrieve in total, or
None in case of no limit.
:return: 'search' list from API output.
:rtype: Generator
"""
lang_codes = self._paraminfo.parameter('wbsearchentities',
'language')['type']
if language not in lang_codes:
raise ValueError('Data site used does not support provided '
'language.')
if 'site' in kwargs:
if kwargs['site'].sitename != self.sitename:
raise ValueError('The site given in the kwargs is different.')
warn('search_entities should not get a site via kwargs.',
UserWarning, 2)
del kwargs['site']
parameters = dict(search=search, language=language, **kwargs)
gen = self._generator(api.APIGenerator,
type_arg='wbsearchentities',
data_name='search',
total=total, parameters=parameters)
return gen
@need_right('edit')
def _wbset_action(self, itemdef, action: str, action_data,
**kwargs) -> dict:
"""
Execute wbset{action} on a Wikibase entity.
Supported actions are:
wbsetaliases, wbsetdescription, wbsetlabel and wbsetsitelink
:param itemdef: Entity to modify or create
:type itemdef: str, WikibaseEntity or Page connected to such item
:param action: wbset{action} to perform:
'wbsetaliases', 'wbsetdescription', 'wbsetlabel', 'wbsetsitelink'
:param action_data: data to be used in API request, see API help
:type action_data: SiteLink or dict
wbsetaliases:
dict shall have the following structure:
{'language': value (str),
'add': list of language codes (str),
'remove': list of language codes (str),
'set' list of language codes (str)
}
'add' and 'remove' are alternative to 'set'
wbsetdescription and wbsetlabel:
dict shall have keys 'language', 'value'
wbsetsitelink:
dict shall have keys 'linksite', 'linktitle' and
optionally 'badges'
:keyword bot: Whether to mark the edit as a bot edit, default is True
:type bot: bool
:keyword tags: Change tags to apply with the edit
:type tags: list of str
:return: query result
:raises AssertionError, TypeError
"""
def format_sitelink(sitelink):
"""Convert SiteLink to a dict accepted by wbsetsitelink API."""
if isinstance(sitelink, pywikibot.page.SiteLink):
_dict = {
'linksite': sitelink._sitekey,
'linktitle': sitelink._rawtitle,
'badges': '|'.join([b.title() for b in sitelink.badges]),
}
else:
_dict = sitelink
return _dict
def prepare_data(action, data):
"""Prepare data as expected by API."""
if action == 'wbsetaliases':
res = data
keys = set(res)
assert keys < {'language', 'add', 'remove', 'set'}
assert 'language' in keys
assert ({'add', 'remove', 'set'} & keys)
assert ({'add', 'set'} >= keys)
assert ({'remove', 'set'} >= keys)
elif action in ('wbsetlabel', 'wbsetdescription'):
res = data
keys = set(res)
assert keys == {'language', 'value'}
elif action == 'wbsetsitelink':
res = format_sitelink(data)
keys = set(res)
assert keys >= {'linksite'}
assert keys <= {'linksite', 'linktitle', 'badges'}
else:
raise ValueError('Something has gone wrong ...')
return res
# Supported actions
assert action in ('wbsetaliases', 'wbsetdescription',
'wbsetlabel', 'wbsetsitelink'), \
'action {} not supported.'.format(action)
# prefer ID over (site, title)
if isinstance(itemdef, str):
itemdef = self.get_entity_for_entity_id(itemdef)
elif isinstance(itemdef, pywikibot.Page):
itemdef = pywikibot.ItemPage.fromPage(itemdef, lazy_load=True)
elif not isinstance(itemdef, pywikibot.page.WikibaseEntity):
raise TypeError('itemdef shall be str, WikibaseEntity or Page')
params = itemdef._defined_by(singular=True)
# TODO: support 'new'
baserevid = kwargs.pop(
'baserevid',
itemdef.latest_revision_id if 'id' in params else 0
)
params.update(
{'baserevid': baserevid,
'action': action,
'token': self.tokens['edit'],
'bot': kwargs.pop('bot', True),
})
params.update(prepare_data(action, action_data))
for arg in kwargs:
if arg in ['summary', 'tags']:
params[arg] = kwargs[arg]
else:
warn('Unknown parameter {} for action {}, ignored'
.format(arg, action), UserWarning, 2)
req = self.simple_request(**params)
data = req.submit()
return data
def wbsetaliases(self, itemdef, aliases, **kwargs):
"""
Set aliases for a single Wikibase entity.
See self._wbset_action() for parameters
"""
return self._wbset_action(itemdef, 'wbsetaliases', aliases, **kwargs)
def wbsetdescription(self, itemdef, description, **kwargs):
"""
Set description for a single Wikibase entity.
See self._wbset_action()
"""
return self._wbset_action(itemdef, 'wbsetdescription', description,
**kwargs)
def wbsetlabel(self, itemdef, label, **kwargs):
"""
Set label for a single Wikibase entity.
See self._wbset_action() for parameters
"""
return self._wbset_action(itemdef, 'wbsetlabel', label, **kwargs)
def wbsetsitelink(self, itemdef, sitelink, **kwargs):
"""
Set, remove or modify a sitelink on a Wikibase item.
See self._wbset_action() for parameters
"""
return self._wbset_action(itemdef, 'wbsetsitelink', sitelink, **kwargs)
@need_right('edit')
@need_extension('WikibaseLexeme')
def add_form(self, lexeme, form, *, bot: bool = True,
baserevid=None) -> dict:
"""
Add a form.
:param lexeme: Lexeme to modify
:type lexeme: pywikibot.LexemePage
:param form: Form to be added
:type form: pywikibot.LexemeForm
:keyword bot: Whether to mark the edit as a bot edit
:keyword baserevid: Base revision id override, used to detect
conflicts.
:type baserevid: long
"""
params = {
'action': 'wbladdform',
'lexemeId': lexeme.getID(),
'data': json.dumps(form.toJSON()),
'bot': bot,
'token': self.tokens['edit'],
}
if baserevid:
params['baserevid'] = baserevid
req = self.simple_request(**params)
data = req.submit()
return data
@need_right('edit')
@need_extension('WikibaseLexeme')
def remove_form(self, form, *, bot: bool = True, baserevid=None) -> dict:
"""
Remove a form.
:param form: Form to be removed
:type form: pywikibot.LexemeForm
:keyword bot: Whether to mark the edit as a bot edit
:keyword baserevid: Base revision id override, used to detect
conflicts.
:type baserevid: long
"""
params = {
'action': 'wblremoveform',
'id': form.getID(),
'bot': bot,
'token': self.tokens['edit'],
}
if baserevid:
params['baserevid'] = baserevid
req = self.simple_request(**params)
data = req.submit()
return data
@need_right('edit')
@need_extension('WikibaseLexeme')
def edit_form_elements(self, form, data, *, bot: bool = True,
baserevid=None) -> dict:
"""
Edit lexeme form elements.
:param form: Form
:type form: pywikibot.LexemeForm
:param data: data updates
:type data: dict
:keyword bot: Whether to mark the edit as a bot edit
:keyword baserevid: Base revision id override, used to detect
conflicts.
:type baserevid: long
:return: New form data
"""
params = {
'action': 'wbleditformelements',
'formId': form.getID(),
'data': json.dumps(data),
'bot': bot,
'token': self.tokens['edit'],
}
if baserevid:
params['baserevid'] = baserevid
req = self.simple_request(**params)
data = req.submit()
return data | en | 0.549509 | Objects representing API interface to Wikibase site. # # (C) Pywikibot team, 2012-2022 # # Distributed under the terms of the MIT license. # Wikibase data capable site. Initializer. Find namespaces for each known wikibase entity type. Return namespace for given entity type. :return: corresponding namespace :rtype: Namespace Return namespace for items. :return: item namespace :rtype: Namespace Return namespace for properties. :return: property namespace :rtype: Namespace Return a new instance for given entity id. :raises pywikibot.exceptions.NoWikibaseEntityError: there is no entity with the id :return: a WikibaseEntity subclass :rtype: WikibaseEntity Return the sparql endpoint url, if any has been set. :return: sparql endpoint url :rtype: str|None Return the base uri for concepts/entities. :return: concept base uri :rtype: str Return Site object for the geo-shapes repository e.g. commons. Return Site object for the tabular-datas repository e.g. commons. Fetch the current content of a Wikibase item. This is called loadcontent since wbgetentities does not support fetching old revisions. Eventually this will get replaced by an actual loadrevisions. :param identification: Parameters used to identify the page(s) :type identification: dict :param props: the optional properties to fetch. # TODO: When props is empty it results in # an empty string ('&props=') but it should # result in a missing entry. Yield subclasses of WikibaseEntity's with content prefilled. Note that pages will be iterated in a different order than in the underlying pagelist. :param pagelist: an iterable that yields either WikibaseEntity objects, or Page objects linked to an ItemPage. :param groupsize: how many pages to query at a time # No api call is made because item._content is given # cannot provide get_redirect=True (T145971) Obtain the type of a property. This is used specifically because we can cache the value for a much longer time (near infinite). # Store it for 100 years # the IDs returned from the API can be upper or lowercase, depending # on the version. See bug T55894 for more information. Edit entity. Note: This method is unable to create entities other than 'item' if dict with API parameters was passed to 'entity' parameter. :param entity: Page to edit, or dict with API parameters to use for entity identification :type entity: WikibaseEntity or dict :param data: data updates :type data: dict :param bot: Whether to mark the edit as a bot edit :return: New entity data :rtype: dict # this changes the reference to a new object # If no identification was provided Add a claim. :param entity: Entity to modify :type entity: WikibaseEntity :param claim: Claim to be added :type claim: pywikibot.Claim :param bot: Whether to mark the edit as a bot edit :param summary: Edit summary :type summary: str # Update the item Set the claim target to the value of the provided claim target. :param claim: The source of the claim target value :type claim: pywikibot.Claim :param snaktype: An optional snaktype ('value', 'novalue' or 'somevalue'). Default: 'value' :param bot: Whether to mark the edit as a bot edit :param summary: Edit summary :type summary: str # We need to already have the snak value Save the whole claim to the wikibase site. :param claim: The claim to save :type claim: pywikibot.Claim :param bot: Whether to mark the edit as a bot edit :param summary: Edit summary :type summary: str # We need to already have the snak value # since 7.0.0 Create/Edit a source. .. versionchanged:: 7.0 deprecated `baserevid` parameter was removed :param claim: A Claim object to add the source to :type claim: pywikibot.Claim :param source: A Claim object to be used as a source :type source: pywikibot.Claim :param new: Whether to create a new one if the "source" already exists :param bot: Whether to mark the edit as a bot edit :param summary: Edit summary # build up the snak # set the hash if the source should be changed. # if present, all claims of one source have the same hash # since 7.0.0 Create/Edit a qualifier. .. versionchanged:: 7.0 deprecated `baserevid` parameter was removed :param claim: A Claim object to add the qualifier to :type claim: pywikibot.Claim :param qualifier: A Claim object to be used as a qualifier :type qualifier: pywikibot.Claim :param new: Whether to create a new one if the "qualifier" already exists :param bot: Whether to mark the edit as a bot edit :param summary: Edit summary # build up the snak # since 7.0.0 Remove claims. .. versionchanged:: 7.0 deprecated `baserevid` parameter was removed :param claims: Claims to be removed :type claims: List[pywikibot.Claim] :param bot: Whether to mark the edit as a bot edit :type bot: bool :param summary: Edit summary :type summary: str # Check on_item for all additional claims # since 7.0.0 Remove sources. .. versionchanged:: 7.0 deprecated `baserevid` parameter was removed :param claim: A Claim object to remove the sources from :type claim: pywikibot.Claim :param sources: A list of Claim objects that are sources :type sources: list :param bot: Whether to mark the edit as a bot edit :param summary: Edit summary # since 7.0.0 Remove qualifiers. .. versionchanged:: 7.0 deprecated `baserevid` parameter was removed :param claim: A Claim object to remove the qualifier from :type claim: pywikibot.Claim :param qualifiers: Claim objects currently used as a qualifiers :type qualifiers: List[pywikibot.Claim] :param bot: Whether to mark the edit as a bot edit :param summary: Edit summary Link two pages together. :param page1: First page to link :type page1: pywikibot.Page :param page2: Second page to link :type page2: pywikibot.Page :param bot: Whether to mark the edit as a bot edit :return: dict API output :rtype: dict Merge two items together. :param from_item: Item to merge from :type from_item: pywikibot.ItemPage :param to_item: Item to merge into :type to_item: pywikibot.ItemPage :param ignore_conflicts: Which type of conflicts ('description', 'sitelink', and 'statement') should be ignored :type ignore_conflicts: list of str :param summary: Edit summary :type summary: str :param bot: Whether to mark the edit as a bot edit :return: dict API output :rtype: dict Merge two lexemes together. :param from_lexeme: Lexeme to merge from :type from_lexeme: pywikibot.LexemePage :param to_lexeme: Lexeme to merge into :type to_lexeme: pywikibot.LexemePage :param summary: Edit summary :type summary: str :keyword bot: Whether to mark the edit as a bot edit :return: dict API output Make a redirect to another item. :param to_item: title of target item. :type to_item: pywikibot.ItemPage :param from_item: Title of the item to be redirected. :type from_item: pywikibot.ItemPage :param bot: Whether to mark the edit as a bot edit Search for pages or properties that contain the given text. :param search: Text to find. :param language: Language to search in. :param total: Maximum number of pages to retrieve in total, or None in case of no limit. :return: 'search' list from API output. :rtype: Generator Execute wbset{action} on a Wikibase entity. Supported actions are: wbsetaliases, wbsetdescription, wbsetlabel and wbsetsitelink :param itemdef: Entity to modify or create :type itemdef: str, WikibaseEntity or Page connected to such item :param action: wbset{action} to perform: 'wbsetaliases', 'wbsetdescription', 'wbsetlabel', 'wbsetsitelink' :param action_data: data to be used in API request, see API help :type action_data: SiteLink or dict wbsetaliases: dict shall have the following structure: {'language': value (str), 'add': list of language codes (str), 'remove': list of language codes (str), 'set' list of language codes (str) } 'add' and 'remove' are alternative to 'set' wbsetdescription and wbsetlabel: dict shall have keys 'language', 'value' wbsetsitelink: dict shall have keys 'linksite', 'linktitle' and optionally 'badges' :keyword bot: Whether to mark the edit as a bot edit, default is True :type bot: bool :keyword tags: Change tags to apply with the edit :type tags: list of str :return: query result :raises AssertionError, TypeError Convert SiteLink to a dict accepted by wbsetsitelink API. Prepare data as expected by API. # Supported actions # prefer ID over (site, title) # TODO: support 'new' Set aliases for a single Wikibase entity. See self._wbset_action() for parameters Set description for a single Wikibase entity. See self._wbset_action() Set label for a single Wikibase entity. See self._wbset_action() for parameters Set, remove or modify a sitelink on a Wikibase item. See self._wbset_action() for parameters Add a form. :param lexeme: Lexeme to modify :type lexeme: pywikibot.LexemePage :param form: Form to be added :type form: pywikibot.LexemeForm :keyword bot: Whether to mark the edit as a bot edit :keyword baserevid: Base revision id override, used to detect conflicts. :type baserevid: long Remove a form. :param form: Form to be removed :type form: pywikibot.LexemeForm :keyword bot: Whether to mark the edit as a bot edit :keyword baserevid: Base revision id override, used to detect conflicts. :type baserevid: long Edit lexeme form elements. :param form: Form :type form: pywikibot.LexemeForm :param data: data updates :type data: dict :keyword bot: Whether to mark the edit as a bot edit :keyword baserevid: Base revision id override, used to detect conflicts. :type baserevid: long :return: New form data | 2.240608 | 2 |
app.py | MisaelVillaverde/fourier-calculator | 0 | 8391 | from flask import Flask
from flask import render_template, request
from flask import jsonify
import requests
import json
app = Flask(__name__)
@app.route("/symbo",methods=['POST'])
def symbo():
#import pdb; pdb.set_trace()
session = requests.session()
token = session.get("https://es.symbolab.com/solver/step-by-step/x%5E%7B2%7D?or=input").cookies.get_dict()["sy2.pub.token"]
query = request.json["expression"]
#response = json.loads(session.get(f"https://es.symbolab.com/pub_api/steps?subscribed=true&origin=input&language=es&query=%5Cint+tcos%5Cleft(nt%5Cright)dt+&referer=https%3A%2F%2Fes.symbolab.com%2Fsolver%2Fstep-by-step%2F%255Cint_%257B%2520%257Dtcos%255Cleft(nt%255Cright)dt%2520%3For%3Dinput&plotRequest=PlotOptional&page=step-by-step",headers={
response = json.loads(session.get(f"https://es.symbolab.com/pub_api/steps?subscribed=true&origin=input&language=es&query={query}",headers={
"x-requested-with":"XMLHttpRequest",
"authorization":f"Bearer {token}"
}).content)
return {
"dym":response["dym"],
"solutions":response["solutions"]
}
@app.route('/')
def hello():
return render_template('index.html')
app.run(debug=True) | from flask import Flask
from flask import render_template, request
from flask import jsonify
import requests
import json
app = Flask(__name__)
@app.route("/symbo",methods=['POST'])
def symbo():
#import pdb; pdb.set_trace()
session = requests.session()
token = session.get("https://es.symbolab.com/solver/step-by-step/x%5E%7B2%7D?or=input").cookies.get_dict()["sy2.pub.token"]
query = request.json["expression"]
#response = json.loads(session.get(f"https://es.symbolab.com/pub_api/steps?subscribed=true&origin=input&language=es&query=%5Cint+tcos%5Cleft(nt%5Cright)dt+&referer=https%3A%2F%2Fes.symbolab.com%2Fsolver%2Fstep-by-step%2F%255Cint_%257B%2520%257Dtcos%255Cleft(nt%255Cright)dt%2520%3For%3Dinput&plotRequest=PlotOptional&page=step-by-step",headers={
response = json.loads(session.get(f"https://es.symbolab.com/pub_api/steps?subscribed=true&origin=input&language=es&query={query}",headers={
"x-requested-with":"XMLHttpRequest",
"authorization":f"Bearer {token}"
}).content)
return {
"dym":response["dym"],
"solutions":response["solutions"]
}
@app.route('/')
def hello():
return render_template('index.html')
app.run(debug=True) | en | 0.354839 | #import pdb; pdb.set_trace() #response = json.loads(session.get(f"https://es.symbolab.com/pub_api/steps?subscribed=true&origin=input&language=es&query=%5Cint+tcos%5Cleft(nt%5Cright)dt+&referer=https%3A%2F%2Fes.symbolab.com%2Fsolver%2Fstep-by-step%2F%255Cint_%257B%2520%257Dtcos%255Cleft(nt%255Cright)dt%2520%3For%3Dinput&plotRequest=PlotOptional&page=step-by-step",headers={ | 2.918962 | 3 |
my_code/Chapter_2.py | kalona/Spark-The-Definitive-Guide | 2 | 8392 | from pyspark.sql import SparkSession
# spark = SparkSession.builder.master("local[*]").getOrCreate()
spark = SparkSession.builder.getOrCreate()
file_path = "C:\home_work\local_github\Spark-The-Definitive-Guide\data\/flight-data\csv\/2015-summary.csv"
# COMMAND ----------
# COMMAND ----------
flightData2015 = spark\
.read\
.option("inferSchema", "true")\
.option("header", "true")\
.csv("./data/flight-data/csv/2015-summary.csv")
# COMMAND ----------
flightData2015.createOrReplaceTempView("flight_data_2015")
# COMMAND ----------
sqlWay = spark.sql("""
SELECT DEST_COUNTRY_NAME, count(1)
FROM flight_data_2015
GROUP BY DEST_COUNTRY_NAME
""")
dataFrameWay = flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.count()
sqlWay.explain()
dataFrameWay.explain()
# COMMAND ----------
from pyspark.sql.functions import max, col
#
flightData2015.select(max(col("count"))).show(1)
# COMMAND ----------
maxSql = spark.sql("""
SELECT DEST_COUNTRY_NAME, sum(count) as destination_total
FROM flight_data_2015
GROUP BY DEST_COUNTRY_NAME
ORDER BY sum(count) DESC
LIMIT 5
""")
maxSql.show()
# COMMAND ----------
from pyspark.sql.functions import desc
flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.sum("count")\
.withColumnRenamed("sum(count)", "destination_total")\
.sort(desc("destination_total"))\
.limit(5)\
.show()
# COMMAND ----------
flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.sum("count")\
.withColumnRenamed("sum(count)", "destination_total")\
.sort(desc("destination_total"))\
.limit(5)\
.explain()
# COMMAND ----------
| from pyspark.sql import SparkSession
# spark = SparkSession.builder.master("local[*]").getOrCreate()
spark = SparkSession.builder.getOrCreate()
file_path = "C:\home_work\local_github\Spark-The-Definitive-Guide\data\/flight-data\csv\/2015-summary.csv"
# COMMAND ----------
# COMMAND ----------
flightData2015 = spark\
.read\
.option("inferSchema", "true")\
.option("header", "true")\
.csv("./data/flight-data/csv/2015-summary.csv")
# COMMAND ----------
flightData2015.createOrReplaceTempView("flight_data_2015")
# COMMAND ----------
sqlWay = spark.sql("""
SELECT DEST_COUNTRY_NAME, count(1)
FROM flight_data_2015
GROUP BY DEST_COUNTRY_NAME
""")
dataFrameWay = flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.count()
sqlWay.explain()
dataFrameWay.explain()
# COMMAND ----------
from pyspark.sql.functions import max, col
#
flightData2015.select(max(col("count"))).show(1)
# COMMAND ----------
maxSql = spark.sql("""
SELECT DEST_COUNTRY_NAME, sum(count) as destination_total
FROM flight_data_2015
GROUP BY DEST_COUNTRY_NAME
ORDER BY sum(count) DESC
LIMIT 5
""")
maxSql.show()
# COMMAND ----------
from pyspark.sql.functions import desc
flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.sum("count")\
.withColumnRenamed("sum(count)", "destination_total")\
.sort(desc("destination_total"))\
.limit(5)\
.show()
# COMMAND ----------
flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.sum("count")\
.withColumnRenamed("sum(count)", "destination_total")\
.sort(desc("destination_total"))\
.limit(5)\
.explain()
# COMMAND ----------
| en | 0.294691 | # spark = SparkSession.builder.master("local[*]").getOrCreate() # COMMAND ---------- # COMMAND ---------- # COMMAND ---------- # COMMAND ---------- SELECT DEST_COUNTRY_NAME, count(1) FROM flight_data_2015 GROUP BY DEST_COUNTRY_NAME # COMMAND ---------- # # COMMAND ---------- SELECT DEST_COUNTRY_NAME, sum(count) as destination_total FROM flight_data_2015 GROUP BY DEST_COUNTRY_NAME ORDER BY sum(count) DESC LIMIT 5 # COMMAND ---------- # COMMAND ---------- # COMMAND ---------- | 3.209193 | 3 |
tests/test_intake_postgres.py | ContinuumIO/intake-postgres | 2 | 8393 | import os
import pickle
import pytest
import pandas as pd
from shapely import wkt
from intake_postgres import PostgresSource
from intake import open_catalog
from .util import verify_datasource_interface
TEST_DATA_DIR = 'tests'
TEST_DATA = [
('sample1', 'sample1.csv'),
('sample2_1', 'sample2_1.csv'),
('sample2_2', 'sample2_2.csv'),
]
TEST_GIS_DATA = [
('points', 'sample_points.psql'),
('multipoints', 'sample_multipoints.psql'),
('lines', 'sample_lines.psql'),
('multilines', 'sample_multilines.psql'),
('polygons', 'sample_polygons.psql'),
('multipolygons', 'sample_multipolygons.psql'),
# ('triangles', 'sample_triangles.psql'),
]
TEST_TEMPLATE_DATA = [
'jinja2_params_with_env',
]
@pytest.fixture(scope='module')
def engine():
"""Start docker container for PostgreSQL database, yield a tuple (engine,
metadata), and cleanup connection afterward."""
from .util import start_postgres, stop_postgres
from sqlalchemy import create_engine
stop_postgres(let_fail=True)
local_port = start_postgres()
uri = 'postgresql://postgres@localhost:{}/postgres'.format(local_port)
engine = create_engine(uri)
for table_name, csv_fname in TEST_DATA:
csv_fpath = os.path.join(TEST_DATA_DIR, csv_fname)
df = pd.read_csv(csv_fpath)
df.to_sql(table_name, engine, index=False)
for table_name, psql_fname in TEST_GIS_DATA:
psql_fpath = os.path.join(TEST_DATA_DIR, psql_fname)
with engine.connect() as conn:
with open(psql_fpath, 'r') as fp:
cmds = fp.read().strip().split(';')
for cmd in cmds:
if cmd.strip():
conn.execute(' '.join(cmd.split()))
try:
yield engine
finally:
stop_postgres()
@pytest.mark.parametrize('table_name,_', TEST_DATA)
def test_open(engine, table_name, _):
d = PostgresSource(str(engine.url), 'select * from '+table_name)
assert d.container == 'dataframe'
assert d.description is None
verify_datasource_interface(d)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_discover(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
info = source.discover()
dt = {k: str(v) for k, v in expected_df.dtypes.to_dict().items()}
assert info['dtype'] == dt
assert info['shape'] == (None, 3)
assert info['npartitions'] == 1
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_read(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
df = source.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_discover_after_read(engine, table_name, csv_fpath):
"""Assert that after reading the dataframe, discover() shows more accurate
information.
"""
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
info = source.discover()
dt = {k: str(v) for k, v in expected_df.dtypes.to_dict().items()}
assert info['dtype'] == dt
assert info['shape'] == (None, 3)
assert info['npartitions'] == 1
df = source.read()
assert expected_df.equals(df)
info = source.discover()
assert info['dtype'] == dt
assert info['shape'] == (4, 3)
assert info['npartitions'] == 1
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_close(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
source.close()
# Can reopen after close
df = source.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_pickle(engine, table_name, csv_fpath):
source = PostgresSource(str(engine.url), 'select * from '+table_name)
pickled_source = pickle.dumps(source)
source_clone = pickle.loads(pickled_source)
expected_df = source.read()
df = source_clone.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,_1', TEST_DATA)
def test_catalog(engine, table_name, _1):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = table_name.rsplit('_idx', 1)[0]
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
def test_catalog_join(engine):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = 'sample2'
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
@pytest.mark.parametrize('table_name,_1', TEST_GIS_DATA)
def test_postgis_data(engine, table_name, _1):
from sqlalchemy import MetaData
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = table_name
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
meta = MetaData()
meta.reflect(bind=engine)
col_exprs = ['ST_AsText({0}) as {0}'.format(col.name)
for col in meta.tables[table_name].columns]
_query = pgsrc._sql_expr.replace('*', ', '.join(col_exprs))
expected_df = pd.read_sql_query(_query, engine).applymap(
lambda geom: str(wkt.loads(geom))
)
df = pgsrc.read().applymap(lambda geom: str(wkt.loads(geom)))
assert expected_df.equals(df)
pgsrc.close()
@pytest.mark.parametrize('ds_name', TEST_TEMPLATE_DATA)
def test_jinja2(engine, ds_name):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
| import os
import pickle
import pytest
import pandas as pd
from shapely import wkt
from intake_postgres import PostgresSource
from intake import open_catalog
from .util import verify_datasource_interface
TEST_DATA_DIR = 'tests'
TEST_DATA = [
('sample1', 'sample1.csv'),
('sample2_1', 'sample2_1.csv'),
('sample2_2', 'sample2_2.csv'),
]
TEST_GIS_DATA = [
('points', 'sample_points.psql'),
('multipoints', 'sample_multipoints.psql'),
('lines', 'sample_lines.psql'),
('multilines', 'sample_multilines.psql'),
('polygons', 'sample_polygons.psql'),
('multipolygons', 'sample_multipolygons.psql'),
# ('triangles', 'sample_triangles.psql'),
]
TEST_TEMPLATE_DATA = [
'jinja2_params_with_env',
]
@pytest.fixture(scope='module')
def engine():
"""Start docker container for PostgreSQL database, yield a tuple (engine,
metadata), and cleanup connection afterward."""
from .util import start_postgres, stop_postgres
from sqlalchemy import create_engine
stop_postgres(let_fail=True)
local_port = start_postgres()
uri = 'postgresql://postgres@localhost:{}/postgres'.format(local_port)
engine = create_engine(uri)
for table_name, csv_fname in TEST_DATA:
csv_fpath = os.path.join(TEST_DATA_DIR, csv_fname)
df = pd.read_csv(csv_fpath)
df.to_sql(table_name, engine, index=False)
for table_name, psql_fname in TEST_GIS_DATA:
psql_fpath = os.path.join(TEST_DATA_DIR, psql_fname)
with engine.connect() as conn:
with open(psql_fpath, 'r') as fp:
cmds = fp.read().strip().split(';')
for cmd in cmds:
if cmd.strip():
conn.execute(' '.join(cmd.split()))
try:
yield engine
finally:
stop_postgres()
@pytest.mark.parametrize('table_name,_', TEST_DATA)
def test_open(engine, table_name, _):
d = PostgresSource(str(engine.url), 'select * from '+table_name)
assert d.container == 'dataframe'
assert d.description is None
verify_datasource_interface(d)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_discover(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
info = source.discover()
dt = {k: str(v) for k, v in expected_df.dtypes.to_dict().items()}
assert info['dtype'] == dt
assert info['shape'] == (None, 3)
assert info['npartitions'] == 1
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_read(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
df = source.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_discover_after_read(engine, table_name, csv_fpath):
"""Assert that after reading the dataframe, discover() shows more accurate
information.
"""
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
info = source.discover()
dt = {k: str(v) for k, v in expected_df.dtypes.to_dict().items()}
assert info['dtype'] == dt
assert info['shape'] == (None, 3)
assert info['npartitions'] == 1
df = source.read()
assert expected_df.equals(df)
info = source.discover()
assert info['dtype'] == dt
assert info['shape'] == (4, 3)
assert info['npartitions'] == 1
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_close(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
source.close()
# Can reopen after close
df = source.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_pickle(engine, table_name, csv_fpath):
source = PostgresSource(str(engine.url), 'select * from '+table_name)
pickled_source = pickle.dumps(source)
source_clone = pickle.loads(pickled_source)
expected_df = source.read()
df = source_clone.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,_1', TEST_DATA)
def test_catalog(engine, table_name, _1):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = table_name.rsplit('_idx', 1)[0]
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
def test_catalog_join(engine):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = 'sample2'
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
@pytest.mark.parametrize('table_name,_1', TEST_GIS_DATA)
def test_postgis_data(engine, table_name, _1):
from sqlalchemy import MetaData
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = table_name
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
meta = MetaData()
meta.reflect(bind=engine)
col_exprs = ['ST_AsText({0}) as {0}'.format(col.name)
for col in meta.tables[table_name].columns]
_query = pgsrc._sql_expr.replace('*', ', '.join(col_exprs))
expected_df = pd.read_sql_query(_query, engine).applymap(
lambda geom: str(wkt.loads(geom))
)
df = pgsrc.read().applymap(lambda geom: str(wkt.loads(geom)))
assert expected_df.equals(df)
pgsrc.close()
@pytest.mark.parametrize('ds_name', TEST_TEMPLATE_DATA)
def test_jinja2(engine, ds_name):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
| en | 0.687163 | # ('triangles', 'sample_triangles.psql'), Start docker container for PostgreSQL database, yield a tuple (engine, metadata), and cleanup connection afterward. Assert that after reading the dataframe, discover() shows more accurate information. # Can reopen after close | 2.065835 | 2 |
Module_3/testImage.py | dks1018/CoffeeShopCoding | 0 | 8394 | # file = open('C:\\Users\\dks10\\OneDrive\\Desktop\\Projects\\Code\\Python\\PythonCrypto\\Module_3\\eye.png', 'rb')
file = open('encrypt_eye.png', 'rb')
image = file.read()
file.close()
image = bytearray(image)
key = 48
for index, value in enumerate(image):
image[index] = value^key
file = open('2eye.png','wb')
file.write(image)
file.close() | # file = open('C:\\Users\\dks10\\OneDrive\\Desktop\\Projects\\Code\\Python\\PythonCrypto\\Module_3\\eye.png', 'rb')
file = open('encrypt_eye.png', 'rb')
image = file.read()
file.close()
image = bytearray(image)
key = 48
for index, value in enumerate(image):
image[index] = value^key
file = open('2eye.png','wb')
file.write(image)
file.close() | en | 0.293972 | # file = open('C:\\Users\\dks10\\OneDrive\\Desktop\\Projects\\Code\\Python\\PythonCrypto\\Module_3\\eye.png', 'rb') | 2.911926 | 3 |
ledfxcontroller/effects/temporal.py | Aircoookie/LedFx | 17 | 8395 | <reponame>Aircoookie/LedFx
import time
import logging
from ledfxcontroller.effects import Effect
from threading import Thread
import voluptuous as vol
_LOGGER = logging.getLogger(__name__)
DEFAULT_RATE = 1.0 / 60.0
@Effect.no_registration
class TemporalEffect(Effect):
_thread_active = False
_thread = None
CONFIG_SCHEMA = vol.Schema({
vol.Required('speed', default = 1.0): float
})
def thread_function(self):
while self._thread_active:
startTime = time.time()
# Treat the return value of the effect loop as a speed modifier
# such that effects that are nartually faster or slower can have
# a consistent feel.
sleepInterval = self.effect_loop()
if sleepInterval is None:
sleepInterval = 1.0
sleepInterval = sleepInterval * DEFAULT_RATE
# Calculate the time to sleep accounting for potential heavy
# frame assembly operations
timeToSleep = (sleepInterval / self._config['speed']) - (time.time() - startTime)
if timeToSleep > 0:
time.sleep(timeToSleep)
def effect_loop(self):
"""
Triggered periodically based on the effect speed and
any additional effect modifiers
"""
pass
def activate(self, pixel_count):
super().activate(pixel_count)
self._thread_active = True
self._thread = Thread(target = self.thread_function)
self._thread.start()
def deactivate(self):
if self._thread_active:
self._thread_active = False
self._thread.join()
self._thread = None
super().deactivate()
| import time
import logging
from ledfxcontroller.effects import Effect
from threading import Thread
import voluptuous as vol
_LOGGER = logging.getLogger(__name__)
DEFAULT_RATE = 1.0 / 60.0
@Effect.no_registration
class TemporalEffect(Effect):
_thread_active = False
_thread = None
CONFIG_SCHEMA = vol.Schema({
vol.Required('speed', default = 1.0): float
})
def thread_function(self):
while self._thread_active:
startTime = time.time()
# Treat the return value of the effect loop as a speed modifier
# such that effects that are nartually faster or slower can have
# a consistent feel.
sleepInterval = self.effect_loop()
if sleepInterval is None:
sleepInterval = 1.0
sleepInterval = sleepInterval * DEFAULT_RATE
# Calculate the time to sleep accounting for potential heavy
# frame assembly operations
timeToSleep = (sleepInterval / self._config['speed']) - (time.time() - startTime)
if timeToSleep > 0:
time.sleep(timeToSleep)
def effect_loop(self):
"""
Triggered periodically based on the effect speed and
any additional effect modifiers
"""
pass
def activate(self, pixel_count):
super().activate(pixel_count)
self._thread_active = True
self._thread = Thread(target = self.thread_function)
self._thread.start()
def deactivate(self):
if self._thread_active:
self._thread_active = False
self._thread.join()
self._thread = None
super().deactivate() | en | 0.91278 | # Treat the return value of the effect loop as a speed modifier # such that effects that are nartually faster or slower can have # a consistent feel. # Calculate the time to sleep accounting for potential heavy # frame assembly operations Triggered periodically based on the effect speed and any additional effect modifiers | 2.667645 | 3 |
07/c/3 - Square Census.py | Surferlul/csc-python-solutions | 0 | 8396 | <gh_stars>0
n=int(input())
c = 1
while c**2 < n:
print(c**2)
c += 1
| n=int(input())
c = 1
while c**2 < n:
print(c**2)
c += 1 | none | 1 | 3.463962 | 3 |
|
utils.py | LuChang-CS/sherbet | 2 | 8397 | import numpy as np
class DataGenerator:
def __init__(self, inputs, shuffle=True, batch_size=32):
assert len(inputs) > 0
self.inputs = inputs
self.idx = np.arange(len(inputs[0]))
self.shuffle = shuffle
self.batch_size = batch_size
self.on_epoch_end()
def data_length(self):
return len(self.idx)
def __len__(self):
n = self.data_length()
len_ = n // self.batch_size
return len_ if n % self.batch_size == 0 else len_ + 1
def __getitem__(self, index):
start = index * self.batch_size
end = start + self.batch_size
index = self.idx[start:end]
data = []
for x in self.inputs:
data.append(x[index])
return data
def on_epoch_end(self):
if self.shuffle:
np.random.shuffle(self.idx)
def set_batch_size(self, batch_size):
self.batch_size = batch_size
def lr_decay(total_epoch, init_lr, split_val):
lr_map = [init_lr] * total_epoch
if len(split_val) > 0:
assert split_val[0][0] > 1
assert split_val[-1][0] <= total_epoch
current_split_index = 0
current_lr = init_lr
next_epoch, next_lr = split_val[current_split_index]
for i in range(total_epoch):
if i < next_epoch - 1:
lr_map[i] = current_lr
else:
current_lr = next_lr
lr_map[i] = current_lr
current_split_index += 1
if current_split_index >= len(split_val):
next_epoch = total_epoch + 1
else:
next_epoch, next_lr = split_val[current_split_index]
def lr_schedule_fn(epoch, lr):
return lr_map[epoch]
return lr_schedule_fn
| import numpy as np
class DataGenerator:
def __init__(self, inputs, shuffle=True, batch_size=32):
assert len(inputs) > 0
self.inputs = inputs
self.idx = np.arange(len(inputs[0]))
self.shuffle = shuffle
self.batch_size = batch_size
self.on_epoch_end()
def data_length(self):
return len(self.idx)
def __len__(self):
n = self.data_length()
len_ = n // self.batch_size
return len_ if n % self.batch_size == 0 else len_ + 1
def __getitem__(self, index):
start = index * self.batch_size
end = start + self.batch_size
index = self.idx[start:end]
data = []
for x in self.inputs:
data.append(x[index])
return data
def on_epoch_end(self):
if self.shuffle:
np.random.shuffle(self.idx)
def set_batch_size(self, batch_size):
self.batch_size = batch_size
def lr_decay(total_epoch, init_lr, split_val):
lr_map = [init_lr] * total_epoch
if len(split_val) > 0:
assert split_val[0][0] > 1
assert split_val[-1][0] <= total_epoch
current_split_index = 0
current_lr = init_lr
next_epoch, next_lr = split_val[current_split_index]
for i in range(total_epoch):
if i < next_epoch - 1:
lr_map[i] = current_lr
else:
current_lr = next_lr
lr_map[i] = current_lr
current_split_index += 1
if current_split_index >= len(split_val):
next_epoch = total_epoch + 1
else:
next_epoch, next_lr = split_val[current_split_index]
def lr_schedule_fn(epoch, lr):
return lr_map[epoch]
return lr_schedule_fn
| none | 1 | 2.748842 | 3 |
|
Version1_STI.py | sudhanshu55/Speech_to_Image | 0 | 8398 | from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import speech_recognition as sr
import nltk
from google_images_download import google_images_download
response = google_images_download.googleimagesdownload()
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
data = r.recognize_google(audio).encode("utf-8")
print (data)
stopWords = set(stopwords.words('english'))
words = word_tokenize(data)
wordsFiltered = []
for w in words:
if w not in stopWords:
wordsFiltered.append(w)
into_string = str(wordsFiltered)
print(into_string)
arguments = {"keywords":into_string,"limit":2,"print_urls":True} #creating list of arguments
response.download(arguments) #passing the arguments to the function | from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import speech_recognition as sr
import nltk
from google_images_download import google_images_download
response = google_images_download.googleimagesdownload()
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
data = r.recognize_google(audio).encode("utf-8")
print (data)
stopWords = set(stopwords.words('english'))
words = word_tokenize(data)
wordsFiltered = []
for w in words:
if w not in stopWords:
wordsFiltered.append(w)
into_string = str(wordsFiltered)
print(into_string)
arguments = {"keywords":into_string,"limit":2,"print_urls":True} #creating list of arguments
response.download(arguments) #passing the arguments to the function | en | 0.177993 | #creating list of arguments #passing the arguments to the function | 3.290518 | 3 |
src/models.py | jonathanlloyd/scratchstack-httpserver | 0 | 8399 | <gh_stars>0
from dataclasses import dataclass
@dataclass
class Request:
method: str
path: str
headers: dict
body: bytes
@dataclass
class Response:
status_code: int
reason_phrase: str
headers: dict
body: bytes
| from dataclasses import dataclass
@dataclass
class Request:
method: str
path: str
headers: dict
body: bytes
@dataclass
class Response:
status_code: int
reason_phrase: str
headers: dict
body: bytes | none | 1 | 2.44192 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.