repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
takkaneko/netengtools | boreasprov.py | 1 | 2030 | #!/usr/bin/env python3
# boreasprov.py
import hafwl2l3prov
import fwl2l3prov
import halbl2l3prov
import lbl2l3prov
import ipsprov
import hafwiloprov
import fwiloprov
def main():
title ='\n****************************************\n'
title += 'BOREAS NETWORK DEVICE PROVISIONING SUITE\n'
title += '****************************************\n\n'
title += 'Please select your provisioning task from the following menus:\n'
print(title)
options = '1. HA firewall (CheckPoint or ASA5500 series)\n'
options += '2. Stand-alone firewall (CheckPoint or ASA5500 series)\n'
options += '3. HA Alteon4408\n'
options += '4. Stand-alone Alteon4408\n'
options += '5. IPS only (To monitor multiple devices or an HA *standby*)\n'
options += '6. HA firewall SecNet iLO segment (Not common)\n'
options += '7. Stand-alone firewall SecNet iLO segment (Not common)\n'
print(options)
while True:
try:
choice = int(input('Type your selection then hit Enter: '))
if 1 <= choice <=7:
break
else:
print('ERROR: DATA OUT OF RANGE\n')
except ValueError:
print('ERROR: INVALID DATA PROVIDED\n')
if choice == 1:
print('Starting HA firewall provisioning...\n')
hafwl2l3prov.main()
if choice == 2:
print('Starting stand-alone firewall provisioning...\n')
fwl2l3prov.main()
if choice == 3:
print('Starting HA Alteon4408 provisioning...\n')
halbl2l3prov.main()
if choice == 4:
print('Starting stand-alone Alteon4408 provisioning...\n')
lbl2l3prov.main()
if choice == 5:
print('Starting IPS provisioning...\n')
ipsprov.main()
if choice == 6:
print('Starting HA firewall SecNet iLO provisioning...\n')
hafwiloprov.main()
if choice == 7:
print('Starting stand-alone firewall SecNet iLO provisioning...\n')
fwiloprov.main()
if __name__ == '__main__':
main()
| mit | 8,231,743,044,508,089,000 | 32.833333 | 79 | 0.587192 | false |
cdemulde/wwdata | wwdata/Class_HydroData.py | 1 | 84432 | # -*- coding: utf-8 -*-
"""
Class_HydroData provides functionalities for handling data obtained in the context of (waste)water treatment.
Copyright (C) 2016 Chaim De Mulder
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses/.
"""
#import sys
import os
#from os import listdir
import pandas as pd
import scipy as sp
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt #plotten in python
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import warnings as wn
import wwdata.data_reading_functions
class HydroData():
"""
Attributes
----------
timedata_column : str
name of the column containing the time data
data_type : str
type of data provided
experiment_tag : str
A tag identifying the experiment; can be a date or a code used by
the producer/owner of the data.
time_unit : str
The time unit in which the time data is given
units : dict
The units of the variables in the columns
"""
def __init__(self,data,timedata_column='index',data_type='WWTP',
experiment_tag='No tag given',time_unit=None,
units={}):
"""
initialisation of a HydroData object.
"""
if isinstance(data, pd.DataFrame):
self.data = data.copy()
else:
try:
self.data = pd.DataFrame(data.copy())
except:
raise Exception("Input data not convertable to DataFrame.")
if timedata_column == 'index':
self.timename = 'index'
self.time = self.data.index
else:
self.timename = timedata_column
self.time = self.data[timedata_column].values.ravel()
self.columns = np.array(self.data.columns)
self.data_type = data_type
self.tag = experiment_tag
self.time_unit = time_unit
self.meta_valid = pd.DataFrame(index=self.data.index)
self.units = units
#self.highs = pd.DataFrame(data=0,columns=['highs'],index=self.data.index)
#wn.warn('WARNING: Some functions in the OnlineSensorBased Class assume ' + \
#'equidistant data!!! This is primarily of importance when indexes are ' + \
#'missing!')
def set_tag(self,tag):
"""
Sets the tag element of the HydroData object to the given tag
Returns
-------
None
"""
self.tag = tag
def set_units(self,units):
"""
Set the units element of the HydroData object to a given dataframe
"""
if isinstance(units, pd.DataFrame):
self.units = units.copy()
else:
try:
self.units = pd.DataFrame(units.copy())
except:
raise Exception("Unit data not convertable to DataFrame type.")
def set_time_unit(self,unit):
"""
Sets the time_unit element of the HydroData object to a given unit
Returns
-------
None
"""
self.time_unit = unit
def head(self, n=5):
"""piping pandas head function, see https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.html for documentation"""
return self.data.head(n)
def tail(self, n=5):
"""piping pandas tail function, see https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.tail.html for documentation"""
return self.data.tail(n)
def index(self):
"""piping pandas index function, see http://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.Index.html for documentation"""
return self.data.index
#####################
### FORMATTING
#####################
def fill_index(self,arange,index_type='float'):
"""
function to fill in missing index values
"""
wn.warn('This function assumes equidistant data and fills the indexes '+\
'accordingly')
first_part = self.data[self.data.index < arange[0]]
if isinstance(self.data.index[0],dt.datetime):
delta_time = self.data.index[1]-self.data.index[0]
index = [arange[0] + delta_time * x for x in range(0, int((arange[1]-arange[0])/delta_time))]
elif isinstance(self.data.index[0],float):
day_length = float(len(self.data[0:1]))
index = np.arange(arange[0],arange[1],(arange[1]-arange[0])/day_length)
fill_part = pd.DataFrame(index=index,columns=self.data.columns)
last_part = self.data[self.data.index > arange[1]]
self.data = first_part.append(fill_part).append(last_part)
self._update_time()
def _reset_meta_valid(self,data_name=None):
"""
reset the meta dataframe, possibly for only a certain data series,
should wrong labels have been assigned at some point
"""
if data_name == None:
self.meta_valid = pd.DataFrame(index=self.data.index)
else:
try:
self.meta_valid[data_name] = pd.Series(['original']*len(self.meta_valid),index=self.index())
#self.meta_valid.drop(data_name,axis=1)
except:
pass
#wn.warn(data_name + ' is not contained in self.meta_valid yet, so cannot\
#be removed from it!')
def drop_index_duplicates(self):
"""
drop rows with a duplicate index. Also updates the meta_valid dataframe
Note
----
It is assumed that the dropped rows containt the same data as their index-
based duplicate, i.e. that no data is lost using the function.
"""
#len_orig = len(self.data)
self.data = self.data.groupby(self.index()).first()
self.meta_valid = self.meta_valid.groupby(self.meta_valid.index).first()
self._update_time()
if isinstance(self.index()[1],str):
wn.warn('Rows may change order using this function based on '+ \
'string values. Convert to datetime, int or float and use '+ \
'.sort_index() or .sort_value() to avoid. (see also ww.to_datetime())')
def replace(self,to_replace,value,inplace=False):
"""piping pandas replace function, see http://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.replace.html for documentation"""
if inplace == False:
return self.__class__(self.data.replace(to_replace,value,inplace=False),
self.data.timename,self.data_type,
self.tag,self.time_unit)
elif inplace == True:
return self.data.replace(to_replace,value,inplace=inplace)
def set_index(self,keys,key_is_time=False,drop=True,inplace=False,
verify_integrity=False,save_prev_index=True):
"""
piping and extending pandas set_index function, see https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.set_index.html for documentation
Notes
----------
key_is_time : bool
when true, the new index will we known as the time data from here on
(other arguments cfr pd.set_index)
Returns
-------
HydroData object (if inplace=False)
None (if inplace=True)
"""
if save_prev_index:
self.prev_index = self.data.index
if not inplace:
if key_is_time:
if isinstance(self.time[0],str):
raise ValueError('Time values of type "str" can not be used as index')
timedata_column = 'index'
elif key_is_time == False:
timedata_column = self.timename
data = self.data.set_index(keys,drop=drop,inplace=False,
verify_integrity=verify_integrity)
return self.__class__(pd.DataFrame(data),timedata_column=timedata_column,
data_type=self.data_type,experiment_tag=self.tag,
time_unit=self.time_unit)
elif inplace:
if key_is_time:
if self.timename == 'index':
raise IndexError('There already is a timeseries in the dataframe index!')
if isinstance(self.time[0],str):
raise ValueError('Time values of type "str" can not be used as index')
self.data.set_index(keys,drop=drop,inplace=True,
verify_integrity=verify_integrity)
self.columns = np.array(self.data.columns)
self._update_meta_valid_index()
if key_is_time:
self.timename = 'index'
self.time = self.data.index
def _update_time(self):
"""
adjust the value of self.time, needed in some functions
"""
if self.timename == 'index':
self.time = self.index()
else:
self.time = self.data[self.timename]
def _update_meta_valid_index(self):
"""
update the index of the meta_valid dataframe to be the same as the one of the dataframe
with the data
"""
self.meta_valid.index = self.index()
def to_float(self,columns='all'):
"""
convert values in given columns to float values
Parameters
---------
columns : array of strings
column names of the columns where values need to be converted to floats
"""
if columns == 'all':
columns = self.columns#.levels[0]
for column in columns:
try:
self.data[column] = self.data[column].astype(float)
except TypeError:
print('Data type of column '+ str(column) + ' not convertible to float')
self._update_time()
def to_datetime(self,time_column='index',time_format='%dd-%mm-%yy',
unit='D'):
"""
Piping and modifying pandas to_datetime function
Parameters
---------
time_column : str
column name of the column where values need to be converted to date-
time values. Default 'index' converts index values to datetime
time_format : str
the format to use by to_datetime function to convert strings to
datetime format
unit : str
unit to use by to_datetime function to convert int or float values
to datetime format
"""
if time_column == 'index':
if isinstance(self.time[0],int) or isinstance(self.time[0],float):
self.data.index = pd.to_datetime(self.time,unit=unit)
self.data.sort_index(inplace=True)
elif isinstance(self.time[0],str):
self.data.index = pd.to_datetime(self.time,format=time_format)
self.data.sort_index(inplace=True)
else:
if isinstance(self.time[0],int) or isinstance(self.time[0],float):
self.data.index = pd.to_datetime(self.data[time_column],unit=unit)
self.data.sort_values(inplace=True)
elif isinstance(self.time[0],str):
self.data[time_column] = pd.to_datetime(self.data[time_column].values.ravel(),
format=time_format)
self.data.sort_values(time_column,inplace=True)
self._update_time()
def absolute_to_relative(self,time_data='index',unit='d',inplace=True,
save_abs=True,decimals=5):
"""
converts a pandas series with datetime timevalues to relative timevalues
in the given unit, starting from 0
Parameters
----------
time_data : str
name of the column containing the time data. If this is the index
column, just give 'index' (also default)
unit : str
unit to which to convert the time values (sec, min, hr or d)
Returns
-------
None if inplace is True
HydroData object if inplace it False
"""
if time_data == 'index':
timedata = self.time
else:
timedata = self.data[time_data]
time_delta = timedata - timedata[0]
relative = time_delta.map(total_seconds)
if unit == 'sec':
relative = np.array(relative)
elif unit == 'min':
relative = np.array(relative) / (60)
elif unit == 'hr':
relative = np.array(relative) / (60*60)
elif unit == 'd':
relative = np.array(relative) / (60*60*24)
self.time_unit = unit
if inplace == False:
data = self.data.copy()
data['time_rel'] = relative.round(decimals)
return self.__class__(data,self.timename)
elif inplace == True:
if save_abs == True:
self.data['time_abs'] = timedata
self.columns = np.array(self.data.columns)
if time_data == 'index':
self.data.index = relative.round(decimals)
self._update_time()
self.columns = np.array(self.data.columns)
return None
else:
self.data[time_data] = relative.round(decimals)
return None
def write(self,filename,filepath=os.getcwd(),method='all'):
"""
Parameters
----------
filepath : str
the path the output file should be saved to
filename : str
the name of the output file
method : str (all,filtered,filled)
depending on the method choice, different values will be written out:
all values, only the filtered values or the filled values
for_WEST : bool
include_units : bool
Returns
-------
None; write an output file
"""
if method == 'all':
self.data.to_csv(os.path.join(filepath,filename),sep='\t')
elif method == 'filtered':
to_write = self.data.copy()
for column in self.meta_valid.columns:
to_write[column] = self.data[column][self.meta_valid[column]=='original']
to_write.to_csv(os.path.join(filepath,filename),sep='\t')
elif method == 'filled':
self.filled.to_csv(os.path.join(filepath,filename),sep='\t')
#######################
### DATA EXPLORATION
#######################
def get_avg(self,name=None,only_checked=True):
"""
Gets the averages of all or certain columns in a dataframe
Parameters
----------
name : arary of str
name(s) of the column(s) containing the data to be averaged;
defaults to ['none'] and will calculate average for every column
Returns
-------
pd.DataFrame :
pandas dataframe, containing the average slopes of all or certain
columns
"""
mean = []
if only_checked:
df = self.data.copy()
df[self.meta_valid == 'filtered']=np.nan
if name == None:
mean = df.mean()
elif isinstance(name,str):
mean = df[name].mean()
else:
for i in name:
mean.append(df[name].mean())
else:
if name == None:
mean = self.data.mean()
elif isinstance(name,str):
mean = self.data[name].mean()
else:
for i in name:
mean.append(self.data[name].mean())
return mean
def get_std(self,name=None,only_checked=True):
"""
Gets the standard deviations of all or certain columns in a dataframe
Parameters
----------
dataframe : pd.DataFrame
dataframe containing the columns to calculate the standard deviation for
name : arary of str
name(s) of the column(s) containing the data to calculate standard
deviation for; defaults to ['none'] and will calculate standard
deviation for every column
plot : bool
if True, plots the calculated standard deviations, defaults to False
Returns
-------
pd.DataFrame :
pandas dataframe, containing the average slopes of all or certain
columns
"""
std=[]
if only_checked:
df = self.data.copy()
df[self.meta_valid == 'filtered']=np.nan
if name == None:
std = df.std()
elif isinstance(name,str):
std = df[name].std()
else:
for i in name:
std.append(df[name].std())
else:
if name == None:
std = self.data.std()
elif isinstance(name,str):
std = self.data[name].std()
else:
for i in name:
std.append(self.data[name].std())
return std
def get_highs(self,data_name,bound_value,arange,method='percentile',plot=False):
"""
creates a dataframe with tags indicating what indices have data-values
higher than a certain value; example: the definition/tagging of rain
events.
Parameters
----------
data_name : str
name of the column to execute the function on
bound_value : float
the boundary value above which points will be tagged
arange : array of two values
the range within which high values need to be tagged
method: str (value or percentile)
when percentile, the bound value is a given percentile above which
data points will be tagged, when value, bound_values is used directly
to tag data points.
Returns
-------
None
"""
self._reset_highs()
try:
data_to_use = self.data[data_name][arange[0]:arange[1]].copy()
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange argument type " + \
str(type(arange[0])) + ". Try changing the type of the arange " + \
"values to one compatible with " + str(type(self.data.index[0])) + \
" slicing.")
# get indexes where flow is higher then bound_value
if method is 'value':
bound_value = bound_value
elif method is 'percentile':
bound_value = data_to_use.dropna().quantile(bound_value)
indexes = data_to_use.loc[data_to_use > bound_value].index
self.highs['highs'].loc[indexes] = 1
if plot:
fig = plt.figure(figsize=(16,6))
ax = fig.add_subplot(111)
ax.plot(data_to_use[self.highs['highs']==0].index,
data_to_use[self.highs['highs']==0],
'-g')
ax.plot(data_to_use[self.highs['highs']==1].index,
data_to_use[self.highs['highs']==1],
'.b',label='high')
ax.legend(fontsize=17)
ax.tick_params(labelsize=15)
ax.set_ylabel(data_name,size=17)
ax.set_xlabel('Time',size=17)
def _reset_highs(self):
"""
"""
self.highs = pd.DataFrame(data=0,columns=['highs'],index=self.index())
##############
### FILTERING
##############
def add_to_meta_valid(self,column_names):
"""
Adds (a) column(s) with the given column_name(s) to the self.meta_filled
DataFrame, where all tags are set to 'original'. This makes sure that
also data that already is very reliable can be used further down the
process (e.g. filling etc.)
Parameters
----------
column_names : array
array containing the names of the columns to add to the meta_valied
dataframe
"""
self._plot = 'valid'
# Create/adjust self.filled
self.meta_valid = self.meta_valid.reindex(self.index())
for column in column_names:
if not column in self.meta_valid.columns:
self.meta_valid[column] = 'original'
else:
pass
wn.warn('self.meta_valid already contains a column named ' +
column + '. The original column was kept.')
def tag_nan(self,data_name,arange=None,clear=False):
"""
adds a tag 'filtered' in self.meta_valid for every NaN value in the given
column
Parameters
----------
data_name : str
column name of the column to apply the function to
arange : array of two values
the range within which nan values need to be tagged
clear : bool
when true, resets the tags in meta_valid for the data in column
data_name
Returns
-------
None
"""
self._plot='valid'
if clear:
self._reset_meta_valid(data_name)
self.meta_valid = self.meta_valid.reindex(self.index(),fill_value='!!')
if not data_name in self.meta_valid.columns:
# if the data_name column doesn't exist yet in the meta_valid dataset,
# add it
self.add_to_meta_valid([data_name])
if arange == None:
len_orig = len(self.data[data_name])
self.meta_valid[data_name] = np.where(pd.isnull(self.data[data_name]),
'filtered','original')
len_new = self.data[data_name].count()
else:
# check if arange has the right type
try:
len_orig = len(self.data[data_name][arange[0]:arange[1]])
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange "+\
"argument type " + str(type(arange[0])) + " or " +\
str(type(arange[1])) + ". Try changing the type "+\
"of the arange values to one compatible with " + \
str(type(self.data.index[0])) + " slicing.")
self.meta_valid[data_name][arange[0]:arange[1]] = np.where(pd.isnull(self.data[data_name][arange[0]:arange[1]]),
'filtered','original')
len_new = self.data[data_name][arange[0]:arange[1]].count()
_print_removed_output(len_orig,len_new,'NaN tagging')
def tag_doubles(self,data_name,bound,arange=None,clear=False,inplace=False,log_file=None,
plot=False,final=False):
'''
tags double values that subsequently occur in a measurement series.
This is relevant in case a sensor has failed and produces a constant
signal. A band is provided within which the signal can vary and still
be filtered out
Parameters
----------
data_name : str
column name of the column from which double values will be sought
bound : float
boundary value of the band to use. When the difference between a
point and the next one is smaller then the bound value, the latter
datapoint is tagged as 'filtered'.
arange : array of two values
the range within which double values need to be tagged
clear : bool
if True, the tags added to datapoints before will be removed and put
back to 'original'.
inplace : bool
indicates whether a new dataframe is created and returned or whether
the operations are executed on the existing dataframe (nothing is
returned). (This argument only comes into play when the 'final'
argument is True)
log_file : str
string containing the directory to a log file to be written out
when using this function
plot : bool
whether or not to make a plot of the newly tagged data points
final : bool
if true, the values are actually replaced with nan values (either
inplace or in a new wwdata object)
Returns
-------
HydroData object (if inplace=False)
the dataframe from which the double values of 'data' are removed or
replaced
None (if inplace=True)
'''
self._plot = 'valid'
len_orig = self.data[data_name].count()
# Make temporary object for operations
df_temp = self.__class__(self.data.copy(),timedata_column=self.timename,
data_type=self.data_type,experiment_tag=self.tag,
time_unit=self.time_unit)
# Make a mask with False values for double values to be dropped
bound_mask = abs(self.data[data_name].dropna().diff()) >= bound
# Make sure the indexes are still the same in the mask and df_temp, so the
# tagging can happen
bound_mask = bound_mask.reindex(df_temp.index()).fillna(True)
# Make a mask with False values where data needs to be filtered
if arange == None:
mask = bound_mask
else:
try:
range_mask = (self.index() < arange[0]) | (arange[1] < self.index())
mask = bound_mask | range_mask
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange "+\
"argument type " + str(type(arange[0])) + " or " +\
str(type(arange[1])) + ". Try changing the type "+\
"of the arange values to one compatible with " + \
str(type(self.data.index[0])) + " slicing.")
# Update the index of self.meta_valid
if clear:
self._reset_meta_valid(data_name)
self.meta_valid = self.meta_valid.reindex(self.index(),fill_value='!!')
if not data_name in self.meta_valid.columns:
# if the data_name column doesn't exist yet in the meta_valid dataset,
# add it
self.add_to_meta_valid([data_name])
# Do the actual filtering, based on the mask
df_temp.data[data_name] = df_temp.data[data_name].drop(df_temp.data[mask==False].index)
len_new = df_temp.data[data_name].count()
if log_file == None:
_print_removed_output(len_orig,len_new,'double value tagging')
elif type(log_file) == str:
_log_removed_output(log_file,len_orig,len_new,'filtered')
else:
raise TypeError('Provide the location of the log file \
as a string type, or drop the argument if \
no log file is needed.')
self.meta_valid[data_name][mask==False] = 'filtered'
# Create new temporary object, where the dropped datapoints are replaced
# by nan values (by assigning a new column to the original dataframe)
#df_temp_2 = self.__class__(self.data.copy(),timedata_column=self.timename,
# experiment_tag=self.tag,time_unit=self.time_unit)
#df_temp_2.data[data_name] = df_temp.data[data_name]
#df_temp_2._update_time()
# Update the self.meta_valid dataframe, to contain False values for dropped
# datapoints. This is done by tracking the nan values in df_temp_2
#if data_name in self.meta_valid.columns:
# temp_1 = self.meta_valid[data_name].isin(['filtered'])
# temp_2 = pd.DataFrame(np.where(np.isnan(df_temp_2.data[data_name]),True,False))
# temp_3 = temp_1 | temp_2
# self.meta_valid[data_name] = np.where(temp_3,'filtered','original')
#else:
# self.meta_valid[data_name] = np.isnan(df_temp_2.data[data_name])
# self.meta_valid[data_name] = np.where(self.meta_valid[data_name],'filtered','original')
if plot == True:
self.plot_analysed(data_name)
if final:
if inplace:
self.data[data_name] = df_temp.data[data_name]
self._update_time()
elif not inplace:
return df_temp
if not final:
return None
def tag_extremes(self,data_name,arange=None,limit=0,method='below',
clear=False,plot=False):
"""
Tags values above or below a given limit.
Parameters
----------
data_name : str
name of the column containing the data to be tagged
arange : array of two values
the range within which extreme values need to be tagged
limit : int/float
limit below or above which values need to be tagged
method : 'below' or 'above'
below tags all the values below the given limit, above tags
the values above the limit
clear : bool
if True, the tags added before will be removed and put
back to 'original'.
plot : bool
whether or not to make a plot of the newly tagged data points
Returns
-------
None;
"""
if clear:
self._reset_meta_valid(data_name)
self.meta_valid = self.meta_valid.reindex(self.index(),fill_value='!!')
if not data_name in self.meta_valid.columns:
# if the data_name column doesn't exist yet in the meta_valid dataset,
# add it
self.add_to_meta_valid([data_name])
if arange == None:
len_orig = len(self.data[data_name])
mask_valid = np.where(self.meta_valid[data_name] == 'filtered',True,False)
if method == 'below':
mask_tagging = np.where(self.data[data_name]<limit,True,False)
mask = pd.DataFrame(np.transpose([mask_tagging,mask_valid])).any(axis=1)
self.meta_valid[data_name] = np.where(mask,'filtered','original')
elif method == 'above':
mask_tagging = np.where(self.data[data_name]>limit,True,False)
mask = pd.DataFrame(np.transpose([mask_tagging,mask_valid])).any(axis=1)
self.meta_valid[data_name] = np.where(mask,'filtered','original')
else:
# check if arange has the right type
try:
len_orig = len(self.data[data_name][arange[0]:arange[1]])
mask_valid = np.where(self.meta_valid[data_name][arange[0]:arange[1]] == 'filtered',True,False)
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange "+\
"argument type " + str(type(arange[0])) + " or " +\
str(type(arange[1])) + ". Try changing the type "+\
"of the arange values to one compatible with " + \
str(type(self.data.index[0])) + " slicing.")
if method == 'below':
mask_tagging = np.where(self.data[data_name][arange[0]:arange[1]]<limit,True,False)
mask = pd.DataFrame(np.transpose([mask_tagging,mask_valid])).any(axis=1)
self.meta_valid[data_name][arange[0]:arange[1]] = np.where(mask,'filtered','original')
elif method == 'above':
mask_tagging = np.where(self.data[data_name][arange[0]:arange[1]]>limit,True,False)
mask = pd.DataFrame(np.transpose([mask_tagging,mask_valid])).any(axis=1)
self.meta_valid[data_name][arange[0]:arange[1]] = np.where(mask,'filtered','original')
len_new = len_orig - mask_tagging.sum()
_print_removed_output(len_orig,len_new,'tagging of extremes ('+method+')')
if plot == True:
self.plot_analysed(data_name)
def calc_slopes(self,xdata,ydata,time_unit=None,slope_range=None):
"""
Calculates slopes for given xdata and data_name; if a time unit is given as
an argument, the time values (xdata) will first be converted to this
unit, which will then be used to calculate the slopes with.
Parameters
----------
xdata : str
name of the column containing the xdata for slope calculation
(e.g. time). If 'index', the index is used as xdata. If datetime
objects, a time_unit is expected to calculate the slopes.
data_name : str
name of the column containing the data_name for slope calculation
time_unit : str
time unit to be used for the slope calculation (in case this is
based on time); if None, slopes are simply calculated based on the
values given
!! This value has no impact if the xdata column is the index and is
not a datetime type. If that is the case, it is assumed that the
user knows the unit of the xdata !!
Returns
-------
pd.Series
pandas Series object containing the slopes calculated for the
chosen variable
"""
slopes = pd.DataFrame()
if xdata == 'index':
self.data[xdata] = self.data.index
date_time = isinstance(self.data[xdata][0],np.datetime64) or \
isinstance(self.data[xdata][0],dt.datetime) or \
isinstance(self.data[xdata][0],pd.Timestamp)
if time_unit == None or date_time == False:
try:
slopes = self.data[ydata].diff() / self.data[xdata].diff()
self.time_unit = time_unit
except TypeError:
raise TypeError('Slope calculation cannot be executed, probably due to a \
non-handlable datatype. Either use the time_unit argument or \
use timedata of type np.datetime64, dt.datetime or pd.Timestamp.')
return None
elif time_unit == 'sec':
slopes = self.data[ydata].diff()/ \
(self.data[xdata].diff().dt.seconds)
elif time_unit == 'min':
slopes = self.data[ydata].diff()/ \
(self.data[xdata].diff().dt.seconds / 60)
elif time_unit == 'hr':
slopes = self.data[ydata].diff()/ \
(self.data[xdata].diff().dt.seconds / 3600)
elif time_unit == 'd':
slopes = self.data[ydata].diff()/ \
(self.data[xdata].diff().dt.days + \
self.data[xdata].diff().dt.seconds / 3600 / 24)
else :
raise ValueError('Could not calculate slopes. If you are using \
time-units to calculate slopes, please make sure you entered a \
valid time unit for slope calculation (sec, min, hr or d)')
if xdata == 'index':
self.data.drop(xdata,axis=1,inplace=True)
return slopes
def moving_slope_filter(self,xdata,data_name,cutoff,arange,time_unit=None,
clear=False,inplace=False,log_file=None,plot=False,
final=False):
"""
Filters out datapoints based on the difference between the slope in one
point and the next (sudden changes like noise get filtered out), based
on a given cut off value. Replaces the dropped values with NaN values.
Parameters
----------
xdata : str
name of the column containing the xdata for slope calculation
(e.g. time). If 'index', the index is used as xdata. If datetime
objects, a time_unit is expected to calculate the slopes.
data_name : str
name of the column containing the data that needs to be filtered
cutoff: int
the cutoff value to compare the slopes with to apply the filtering.
arange : array of two values
the range within which the moving slope filter needs to be applied
time_unit : str
time unit to be used for the slope calculation (in case this is
based on time); if None, slopes are calculated based on the values
given
clear : bool
if True, the tags added to datapoints before will be removed and put
back to 'original'.
inplace : bool
indicates whether a new dataframe is created and returned or whether
the operations are executed on the existing dataframe (nothing is
returned)
log_file : str
string containing the directory to a log file to be written out
when using this function
plot : bool
if true, a plot is made, comparing the original dataset with the
new, filtered dataset
final : bool
if true, the values are actually replaced with nan values (either
inplace or in a new wwdata object)
Returns
-------
HydroData object (if inplace=False)
the dataframe from which the double values of 'data' are removed
None (if inplace=True)
Creates
-------
A new column in the self.meta_valid dataframe, containing a mask indicating
what values are filtered
"""
self._plot = 'valid'
try:
len_orig = self.data[data_name][arange[0]:arange[1]].count()
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange argument type " + \
str(type(arange[0])) + ". Try changing the type of the arange " + \
"values to one compatible with " + str(type(self.data.index[0])) + \
" slicing.")
#if plot == True:
# original = self.__class__(self.data.copy(),timedata_column=self.timename,
# experiment_tag=self.tag,time_unit=self.time_unit)
# Make temporary object for operations
df_temp = self.__class__(self.data[arange[0]:arange[1]].copy(),
timedata_column=self.timename,experiment_tag=self.tag,
time_unit=self.time_unit)
# Update the index of self.meta_valid
if clear:
self._reset_meta_valid(data_name)
self.meta_valid = self.meta_valid.reindex(self.index(),fill_value='!!')
# Calculate slopes and drop values in temporary object
slopes = df_temp.calc_slopes(xdata,data_name,time_unit=time_unit)
if slopes is None:
return None
while abs(slopes).max() > cutoff:
df_temp.data[data_name] = df_temp.data[data_name].drop(slopes[abs(slopes) > cutoff].index)
slopes = df_temp.calc_slopes(xdata,data_name,time_unit=time_unit)
len_new = df_temp.data[data_name].count()
if log_file == None:
_print_removed_output(len_orig,len_new,'moving slope filter')
elif type(log_file) == str:
_log_removed_output(log_file,len_orig,len_new,'filtered')
else :
raise TypeError('Please provide the location of the log file as '+ \
'a string type, or leave the argument if no log '+ \
'file is needed.')
# Create new temporary object, where the dropped datapoints are replaced
# by nan values
df_temp_2 = self.__class__(self.data.copy(),
timedata_column=self.timename,experiment_tag=self.tag,
time_unit=self.time_unit)
df_temp_2.data[data_name] = df_temp.data[data_name]
df_temp_2._update_time()
# Update the self.meta_valid dataframe, to contain False values for dropped
# datapoints and for datapoints already filtered. This is done by
# tracking the nan values in df_temp_2
if data_name in self.meta_valid.columns:
temp_1 = self.meta_valid[data_name].isin(['filtered'])
temp_2 = np.where(pd.isnull(df_temp_2.data[data_name]),True,False)
temp_3 = temp_1 | temp_2
self.meta_valid[data_name] = np.where(temp_3,'filtered','original')
else:
self.meta_valid[data_name] = pd.isnull(df_temp_2.data[data_name])
self.meta_valid[data_name] = np.where(self.meta_valid[data_name],'filtered','original')
if plot == True:
self.plot_analysed(data_name)
if final:
if inplace:
self.data[data_name] = df_temp_2.data[data_name]
self._update_time()
elif not inplace:
return df_temp_2
if not final:
return None
def simple_moving_average(self,arange,window,data_name=None,inplace=False,
plot=True):
"""
Calculate the Simple Moving Average of a dataseries from a dataframe,
using a window within which the datavalues are averaged.
Parameters
----------
arange : array of two values
the range within which the moving average needs to be calculated
window : int
the number of values from the dataset that are used to take the
average at the current point. Defaults to 10
data_name : str or array of str
name of the column(s) containing the data that needs to be
smoothened. If None, smoothened data is computed for the whole
dataframe. Defaults to None
inplace : bool
indicates whether a new dataframe is created and returned or whether
the operations are executed on the existing dataframe (nothing is
returned)
plot : bool
if True, a plot is given for comparison between original and smooth
data
Returns
-------
HydroData (or subclass) object
either a new object (inplace=False) or an adjusted object, con-
taining the smoothened data values
"""
try:
original = self.data[arange[0]:arange[1]].copy()
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange argument type " + \
str(type(arange[0])) + ". Try changing the type of the arange " + \
"values to one compatible with " + str(type(self.data.index[0])) + \
" slicing.")
if len(original) < window:
raise ValueError("Window width exceeds number of datapoints!")
if plot == True:
original = self.__class__(self.data[arange[0]:arange[1]].copy(),
timedata_column=self.timename,experiment_tag=self.tag,
time_unit=self.time_unit)
if inplace == False:
df_temp = self.__class__(self.data[arange[0]:arange[1]].copy(),
timedata_column=self.timename, experiment_tag=self.tag,
time_unit=self.time_unit)
if data_name == None:
df_temp = self.data.rolling(window=window,center=True).mean()
elif isinstance(data_name,str):
df_temp.data[data_name] = self.data[data_name].interpolate().\
rolling(window=window,center=True).mean()
else:
for name in data_name:
df_temp.data[name] = self.data[name].interpolate().\
rolling(window=window,center=True).mean()
elif inplace == True:
if data_name == None:
self.data = self.data.rolling(window=window,center=True).mean()
elif isinstance(data_name,str):
self.data[data_name] = self.data[data_name].interpolate().\
rolling(window=window,center=True).mean()
else:
for name in data_name:
self.data[name] = self.data[name].interpolate().\
rolling(window=window,center=True).mean()
if plot == True:
fig = plt.figure(figsize=(16,6))
ax = fig.add_subplot(111)
ax.plot(original.time,original.data[data_name],'r--',label='original data')
if inplace == False:
ax.plot(df_temp.time,df_temp.data[data_name],'b-',label='averaged data')
elif inplace is True:
ax.plot(self.time,self.data[data_name],'b-',label='averaged data')
ax.legend(fontsize=16)
ax.set_xlabel(self.timename,fontsize=14)
ax.set_ylabel(data_name,fontsize=14)
ax.tick_params(labelsize=15)
if inplace == False:
return df_temp
def moving_average_filter(self,data_name,window,cutoff_frac,arange,clear=False,
inplace=False,log_file=None,plot=False,final=False):
"""
Filters out the peaks/outliers in a dataset by comparing its values to a
smoothened representation of the dataset (Moving Average Filtering). The
filtered values are replaced by NaN values.
Parameters
----------
data_name : str
name of the column containing the data that needs to be filtered
window : int
the number of values from the dataset that are used to take the
average at the current point.
cutoff_frac: float
the cutoff value (in fraction 0-1) to compare the data and smoothened
data: a deviation higher than a certain percentage drops the data-
point.
arange : array of two values
the range within which the moving average filter needs to be applied
clear : bool
if True, the tags added to datapoints before will be removed and put
back to 'original'.
inplace : bool
indicates whether a new dataframe is created and returned or whether
the operations are executed on the existing dataframe (nothing is
returned)
log_file : str
string containing the directory to a log file to be written out
when using this function
plot : bool
if true, a plot is made, comparing the original dataset with the
new, filtered dataset
final : bool
if true, the values are actually replaced with nan values (either
inplace or in a new wwdata object)
Returns
-------
HydroData object (if inplace=False)
the dataframe from which the double values of 'data' are removed
None (if inplace=True)
"""
self._plot = 'valid'
try:
len_orig = self.data[data_name][arange[0]:arange[1]].count()
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange argument type " + \
str(type(arange[0])) + ". Try changing the type of the arange " + \
"values to one compatible with " + str(type(self.data.index[0])) + \
" slicing.")
#if plot == True:
# original = self.__class__(self.data.copy(),timedata_column=self.timename,
# experiment_tag=self.tag,time_unit=self.time_unit)
# Make temporary object for operations
df_temp = self.__class__(self.data[arange[0]:arange[1]].copy(),
timedata_column=self.timename,experiment_tag=self.tag,
time_unit=self.time_unit)
# Make a hydropy object with the smoothened data
smooth_data = self.simple_moving_average(arange,window,data_name,inplace=False,
plot=False)
# Make a mask by comparing smooth and original data, using the given
# cut-off percentage
mask = (abs(smooth_data.data[data_name][arange[0]:arange[1]] - self.data[data_name][arange[0]:arange[1]])/\
smooth_data.data[data_name][arange[0]:arange[1]]) < cutoff_frac
# Update the index of self.meta_valid
if clear:
self._reset_meta_valid(data_name)
self.meta_valid = self.meta_valid.reindex(self.index(),fill_value=True)
# Do the actual filtering, based on the mask
df_temp.data[data_name] = df_temp.data[data_name].drop(df_temp.data[mask==False].index)
len_new = df_temp.data[data_name].count()
if log_file == None:
_print_removed_output(len_orig,len_new,'moving average filter')
elif type(log_file) == str:
_log_removed_output(log_file,len_orig,len_new,'filtered')
else :
raise TypeError('Please provide the location of the log file as \
a string type, or leave the argument if no log \
file is needed.')
# Create new temporary object, where the dropped datapoints are replaced
# by nan values (by assigning a new column to the original dataframe)
df_temp_2 = self.__class__(self.data.copy(),timedata_column=self.timename,
experiment_tag=self.tag,time_unit=self.time_unit)
df_temp_2.data[data_name] = df_temp.data[data_name]
df_temp_2._update_time()
# Update the self.meta_valid dataframe, to contain False values for dropped
# datapoints. This is done by tracking the nan values in df_temp_2
if data_name in self.meta_valid.columns:
temp_1 = self.meta_valid[data_name].isin(['filtered'])
temp_2 = np.where(pd.isnull(df_temp_2.data[data_name]),True,False)
temp_3 = temp_1 | temp_2
self.meta_valid[data_name] = np.where(temp_3,'filtered','original')
else:
self.meta_valid[data_name] = pd.isnull(df_temp_2.data[data_name])
self.meta_valid[data_name] = np.where(self.meta_valid[data_name],'filtered','original')
if plot:
self.plot_analysed(data_name)
if final:
if inplace:
self.data[data_name] = df_temp_2.data[data_name]
self._update_time()
elif not inplace:
return df_temp_2
if not final:
return None
def savgol(self,data_name,window=55,polyorder=2,plot=False,inplace=False):
"""
Uses the scipy.signal Savitzky-Golay filter to smoothen the data of a column;
The values are either replaced or a new dataframe is returned.
Parameters
----------
data_name : str
name of the column containing the data that needs to be filtered
window : int
the length of the filter window; default to 55
polyorder : int
The order of the polynomial used to fit the samples.
polyorder must be less than window. default to 1
plot : bool
if true, a plot is made, comparing the original dataset with the
new, filtered dataset
inplace : bool
indicates whether a new dataframe is created and returned or whether
the operations are executed on the existing dataframe (nothing is
returned)
Returns
-------
HydroData object (if inplace=False)
None (if inplace=True)
"""
from scipy import signal
df_temp = self.__class__(self.data.copy(),timedata_column=self.timename,
experiment_tag=self.tag,time_unit=self.time_unit)
df_temp.data[data_name] = sp.signal.savgol_filter(self.data[data_name]\
,window,polyorder)
if plot:
fig = plt.figure(figsize=(16,6))
ax = fig.add_subplot(111)
ax.plot(self.time,self.data[data_name],'g--',label='original data')
ax.plot(self.time,df_temp.data[data_name],'b-',label='filtered data')
ax.legend(fontsize=16)
ax.set_xlabel(self.timename,fontsize=20)
ax.set_ylabel(data_name,fontsize=20)
ax.tick_params(labelsize=15)
if inplace:
self.data[data_name] = df_temp.data[data_name]
else:
return df_temp
#==============================================================================
# DATA (COR)RELATION
#==============================================================================
def calc_ratio(self,data_1,data_2,arange,only_checked=False):
"""
Given two datasets or -columns, calculates the average ratio between
the first and second dataset, within the given range. Also the standard
deviation on this is calculated
Parameters
----------
data_1 : str
name of the data column containing the data to be in the numerator
of the ratio calculation
data_2 : str
name of the data column containing the data to be in the denominator
of the ratio calculation
arange : array of two values
the range within which the ratio needs to be calculated
only_checked : bool
if 'True', filtered values are excluded; default to 'False'
Returns
-------
The average ratio of the first data column over the second one within
the given range and including the standard deviation
"""
# If indexes are in datetime format, convert the arange array to date-
# time values
#if isinstance(self.data.index[0],pd.Timestamp):
# arange = [(self.data.index[0] + dt.timedelta(arange[0]-1)),
# (self.data.index[0] + dt.timedelta(arange[1]-1))]
try:
self.data.loc[arange[0]:arange[1]]
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange argument type " + \
str(type(arange[0])) + ". Try changing the type of the arange " + \
"values to one compatible with " + str(type(self.data.index[0])) + \
" slicing.")
mean = (self.data[data_1]/self.data[data_2])[arange[0]:arange[1]]\
if arange[0] < self.index()[0] or arange[1] > self.index()[-1]:
raise IndexError('Index out of bounds. Check whether the values of ' + \
'"arange" are within the index range of the data.')
if only_checked == True:
#create new pd.Dataframes for original values in range,
#merge only rows in which both values are original
data_1_checked = pd.DataFrame(self.data[arange[0]:arange[1]][data_1][self.meta_valid[data_1]=='original'].values,
index=self.data[arange[0]:arange[1]][data_1][self.meta_valid[data_1]=='original'].index)
data_2_checked = pd.DataFrame(self.data[arange[0]:arange[1]][data_2][self.meta_valid[data_2]=='original'].values, \
index=self.data[data_2][arange[0]:arange[1]][self.meta_valid[data_2]=='original'].index)
ratio_data = pd.merge(data_1_checked,data_2_checked,left_index=True, right_index=True, how = 'inner')
ratio_data.columns = data_1,data_2
mean = (ratio_data[data_1]/ratio_data[data_2])\
.replace(np.inf,np.nan).mean()
std = (ratio_data[data_1]/ratio_data[data_2])\
.replace(np.inf,np.nan).std()
else:
mean = (self.data[arange[0]:arange[1]][data_1]/self.data[arange[0]:arange[1]][data_2])\
.replace(np.inf,np.nan).mean()
std = (self.data[arange[0]:arange[1]][data_1]/self.data[arange[0]:arange[1]][data_2])\
.replace(np.inf,np.nan).std()
#print('mean : '+str(mean)+ '\n' +'standard deviation : '+str(std))
return mean,std
def compare_ratio(self,data_1,data_2,arange,only_checked=False):
"""
Compares the average ratios of two datasets in multiple different ranges
and returns the most reliable one, based on the standard deviation on
the ratio values
Parameters
----------
data_1 : str
name of the data column containing the data to be in the numerator
of the ratio calculation
data_2 : str
name of the data column containing the data to be in the denominator
of the ratio calculation
arange : int
the range (in days) for which the ratios need to be calculated and
compared
only_checked : bool
if 'True', filtered values are excluded; default to 'False'
Returns
-------
The average ratio within the range that has been found to be the most
reliable one
"""
# Make the array with ranges within which to compute ratios, based on
# arange, indicating what the interval should be.
if isinstance(self.data.index[0],pd.Timestamp):
days = [self.index()[0] + dt.timedelta(arange) * x for x in \
range(0, int((self.index()[-1]-self.index()[0]).days/arange))]
starts = [[y] for y in days]
ends = [[x + dt.timedelta(arange)] for x in days]
#end = (self.data.index[-1] - self.data.index[0]).days+1
elif isinstance(self.data.index[0],float):
end = int(self.index()[-1]+1) # +1 because int rounds downwards
starts = [[y] for y in range(0,end)]
ends = [[x] for x in range(arange,end+arange)]
ranges = np.append(starts,ends,1)
rel_std = np.inf
for r in range(0,len(ranges)):
average,stdev = self.calc_ratio(data_1,data_2,ranges[r],only_checked)
try:
relative_std = stdev/average
if relative_std < rel_std:
std = stdev
avg = average
index = r
rel_std = std/avg
except (ZeroDivisionError):
pass
print('Best ratio (' + str(avg) + ' ± ' + str(std) + \
') was found in the range: ' + str(ranges[index]))
return avg,std
def get_correlation(self,data_1,data_2,arange,zero_intercept=False,
only_checked=False,plot=False):
"""
Calculates the linear regression coefficients that relate data_1 to
data_2
Parameters
----------
data_1 and data_2 : str
names of the data columns containing the data between which the
correlation will be calculated. data_1: independent data; data_2:
dependent data
arange : array
array containing the beginning and end value between which the
correlation needs to be calculated
zero_intercept : bool
indicates whether or not to assume a zero-intercept
only_checked: bool
if 'True', filtered values are excluded from calculation and plotting;
default to 'False'
if a value in one column is filtered, the corresponding value in the second
column also gets excluded!
Returns
-------
the linear regression coefficients of the correlation, as well as the
r-squared -value
"""
# If indexes are in datetime format, and arange values are not,
# convert the arange array to datetime values
if isinstance(self.data.index[0],pd.Timestamp) and \
isinstance(arange[0],int) or isinstance(arange[0],float):
wn.warn('Replacing arange values, assumed to be relative time' + \
' values, with absolute values of type dt.datetime')
arange = [(self.data.index[0] + dt.timedelta(arange[0]-1)),
(self.data.index[0] + dt.timedelta(arange[1]-1))]
#if arange[0] < self.time[0] or arange[1] > self.time[-1]:
# raise IndexError('Index out of bounds. Check whether the values of '+ \
# '"arange" are within the index range of the data.')
self.data = self.data.sort_index()
if only_checked:
#create new pd.Dataframes for original values in range,
#merge only rows in which both values are original
data_1_checked = pd.DataFrame(self.data[data_1][arange[0]:arange[1]][self.meta_valid[data_1]=='original'].values,
index=self.data[data_1][arange[0]:arange[1]][self.meta_valid[data_1]=='original'].index)
data_2_checked = pd.DataFrame(self.data[data_2][arange[0]:arange[1]][self.meta_valid[data_2]=='original'].values,
index=self.data[data_2][arange[0]:arange[1]][self.meta_valid[data_2]=='original'].index)
corr_data = pd.merge(data_1_checked,data_2_checked,left_index=True, right_index=True, how = 'inner')
else:
corr_data = pd.DataFrame(self.data[[data_1,data_2]][arange[0]:arange[1]])
corr_data.columns = data_1,data_2
corr_data = corr_data[[data_1,data_2]].dropna()
import statsmodels.api as sm
X = corr_data[data_1]
Y = corr_data[data_2]
if zero_intercept == False:
X = sm.add_constant(X)
model = sm.OLS(Y,X)
results = model.fit()
slope = results.params[data_1]
r_sq = results.rsquared
if zero_intercept:
intercept = 0
else:
intercept = results.params['const']
if plot:
x = corr_data[data_1].copy().sort_values(inplace=False)
#x = np.arange(self.data[data_2][arange[0]:arange[1]].min(),
# self.data[data_2][arange[0]:arange[1]].max())
#y = slope * x + intercept
if zero_intercept:
y = results.predict(x)
exog = x
else:
x2 = sm.add_constant(x)
y = results.predict(x2)
exog = x2
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
# plot data
ax.plot(corr_data[data_1],corr_data[data_2],'o',markerfacecolor=None,
markeredgewidth=1,markeredgecolor='b',markersize=4,label='Data')
# plot predictions
ax.plot(x,y,'k',label='Linear fit')
# plot prediction intervals
from statsmodels.stats.outliers_influence import summary_table
st, data, ss2 = summary_table(results, alpha=0.05)
lower = data[:,6]
lower.sort()
upper = data[:,7]
upper.sort()
ax.fill_between(x.astype(float), lower, upper, color='k', alpha=0.2,
label='Prediction interval (95%)')
ax.legend(fontsize=15)
ax.tick_params(labelsize=15)
ax.set_ylabel(data_1,size=17)
ax.set_xlabel(data_2,size=17)
fig.tight_layout()
print('slope: ' + str(slope) + ' intercept: ' + str(intercept) + ' R2: ' + str(r_sq))
return fig, ax
return slope,intercept,r_sq
#==============================================================================
# DAILY PROFILE CALCULATION
#==============================================================================
def calc_daily_profile(self,column_name,arange,quantile=0.9,plot=False,
plot_method='quantile',clear=False,only_checked=False):
"""
Calculates a typical daily profile based on data from the indicated
consecutive days. Also saves this average day, along with standard
deviation and lower and upper percentiles as given in the arguments.
Plotting is possible.
Parameters
----------
column_name : str
name of the column containing the data to calculate an average day
for
arange : 2-element array of ints
contains the beginning and end day of the period to use for average
day calculation
quantile : float between 0 and 1
value to use for the calculation of the quantiles
plot : bool
plot or not
plot_method : str
method to use for plotting. Available: "quantile" or "stdev"
clear : bool
wether or not to clear the key in the self.daily_profile dictionary
that is already present
Returns
-------
None
creates a dictionary self.daily_profile containing information
on the average day as calculated.
"""
# several checks to make sure the right types, columns... are used
try:
if not isinstance(self.daily_profile,dict):
self.daily_profile = {}
except AttributeError:
self.daily_profile = {}
if clear:
try:
self.daily_profile.pop(column_name, None)
except KeyError:
pass
if column_name in self.daily_profile.keys():
raise KeyError('self.daily_profile dictionary already contains a ' +\
'key ' + column_name + '. Set argument "clear" to True to erase the ' + \
'key and create a new one.')
# Give warning when replacing data from rain events and at the same time
# check if arange has the right type
try:
rain = (self.data_type == 'WWTP') and \
(self.highs['highs'].loc[arange[0]:arange[1]].sum() > 1)
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange argument type " + \
str(type(arange[0])) + ". Try changing the type of the arange " \
"values to one compatible with " + str(type(self.data.index[0])) + \
" slicing.")
except AttributeError:
raise AttributeError('OnlineSensorBased instance has no attribute "highs". '\
'run .get_highs to tag the peaks in the dataset.')
if rain :
wn.warn("Data points obtained during a rain event will be used for" \
" the calculation of an average day. This might lead to a not-" \
"representative average day and/or high standard deviations.")
daily_profile = pd.DataFrame()
if not isinstance(arange[0],int) and not isinstance(arange[0],dt.datetime):
raise TypeError('The values of arange must be of type int or dt.datetime')
if isinstance(self.data.index[0],dt.datetime):
range_days = pd.date_range(arange[0],arange[1])
indexes = [self.data.index[0],self.data.index[0]+dt.timedelta(1)]
else :
range_days = range(arange[0],arange[1])
indexes = [0,1]
#if isinstance(arange[0],dt.datetime):
# range_days = pd.date_range(arange[0],arange[1])
#if only_checked:
# for i in range_days:
# daily_profile = pd.merge(daily_profile,
# pd.DataFrame(self.data[column_name][i:i+1]\
# [self.meta_valid[column_name]=='original'].values),
# left_index=True, right_index=True,how='outer')
# mean_day = pd.DataFrame(index=daily_profile.index)
# self.data.loc[indexes[0]:indexes[1]].index)#\
# [self.meta_valid[column_name]=='original'].index)
# if isinstance(self.data.index[0],dt.datetime):
# mean_day.index = mean_day.index.time
#else:
if only_checked and column_name in self.meta_valid:
for i in range_days:
if isinstance(i,dt.datetime) or isinstance(i,np.datetime64) or isinstance(i,pd.Timestamp):
name = str(i.month) + '-' + str(i.day)
else:
name = str(i)
mask_valid = pd.DataFrame((self.meta_valid[column_name][i:i+1] == 'original').values,columns=[name])
daily_profile = pd.merge(daily_profile,
pd.DataFrame(self.data[column_name][i:i+1].values,
columns=[name]).where(mask_valid),
left_index=True, right_index=True,how='outer')
else:
if only_checked:
wn.warn('No values of selected column were filtered yet. All values '+ \
'will be displayed.')
for i in range_days:
if isinstance(i,dt.datetime) or isinstance(i,np.datetime64) or isinstance(i,pd.Timestamp):
name = str(i.month) + '-' + str(i.day)
else:
name = str(i)
daily_profile = pd.merge(daily_profile,
pd.DataFrame(self.data[column_name][i:i+1*i.freq].values,
columns=[name]),
left_index=True, right_index=True,how='outer')
daily_profile['index'] = self.data.loc[indexes[0]:indexes[1]].index.time
daily_profile = daily_profile.drop_duplicates(subset='index', keep='first')\
.set_index('index').sort_index()
mean_day = pd.DataFrame(index=daily_profile.index.values)
mean_day['avg'] = daily_profile.mean(axis=1).values
mean_day['std'] = daily_profile.std(axis=1).values
mean_day['Qupper'] = daily_profile.quantile(quantile,axis=1).values
mean_day['Qlower'] = daily_profile.quantile(1-quantile,axis=1).values
self.daily_profile[column_name] = mean_day
if plot:
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
ax.plot(mean_day.index,mean_day['avg'],'g')
if plot_method == 'quantile':
ax.plot(mean_day.index,mean_day['Qupper'],'b',alpha=0.5)
ax.plot(mean_day.index,mean_day['Qlower'],'b',alpha=0.5)
ax.fill_between(mean_day.index,mean_day['avg'],mean_day['Qupper'],
color='grey', alpha=0.3)
ax.fill_between(mean_day.index,mean_day['avg'],mean_day['Qlower'],
color='grey', alpha=0.3)
elif plot_method == 'stdev':
ax.plot(mean_day.index,mean_day['avg']+mean_day['std'],'b',alpha=0.5)
ax.plot(mean_day.index,mean_day['avg']-mean_day['std'],'b',alpha=0.5)
ax.fill_between(mean_day.index,mean_day['avg'],
mean_day['avg']+mean_day['std'],
color='grey', alpha=0.3)
ax.fill_between(mean_day.index,mean_day['avg'],
mean_day['avg']-mean_day['std'],
color='grey', alpha=0.3)
ax.tick_params(labelsize=15)
ax.set_xlim(mean_day.index[0],mean_day.index[-1])
ax.set_ylabel(column_name,size=17)
ax.set_xlabel('Time',size=17)
return fig,ax
##############
### PLOTTING
##############
def plot_analysed(self,data_name,time_range='default',only_checked = False):
"""
plots the values and their types (original, filtered, filled) \
of a given column in the given time range.
Parameters
----------
data_name : str
name of the column containing the data to plot
time_range : array of two values
the range within which the values are plotted; default is all
only_checked : bool
if 'True', filtered values are excluded; default to 'False'
Returns
-------
Plot
"""
# time range settings
if time_range == 'default':
if isinstance(self.time[0],float):
time_range = [int(self.time[0]),int(self.time[-1])+1]
elif isinstance(self.time[0],dt.datetime):
time_range = [self.time[0],self.time[-1]]
else:
if not isinstance(time_range[0],type(self.time[0])) or not \
isinstance(time_range[1],type(self.time[-1])):
raise TypeError('The value type of the values in time_range must ' + \
'be the same as the value type of index values')
if time_range[0] < self.time[0] or time_range[1] > int(self.time[-1]):
raise IndexError('Index out of bounds. Check whether the values of '+\
'"time_range" are within the index range of the data.')
fig = plt.figure(figsize=(16,6))
ax = fig.add_subplot(111)
#create new object with only the values within the given time range
df = self.__class__(self.data[time_range[0]:time_range[1]].copy(),timedata_column=self.timename,
experiment_tag=self.tag,time_unit=self.time_unit)
if self._plot == 'filled':
df.meta_filled = self.meta_filled[time_range[0]:time_range[1]].copy()
df.filled = self.filled[time_range[0]:time_range[1]].copy()
ax.plot(df.time[df.meta_filled[data_name]=='original'],
df.filled[data_name][df.meta_filled[data_name]=='original'],
'.g',label='original')
if only_checked == False:
if (df.meta_filled[data_name]=='filtered').any():
ax.plot(df.time[df.meta_filled[data_name]=='filtered'],
df.data[data_name][df.meta_filled[data_name]=='filtered'],
'.r',label='filtered')
if (df.meta_filled[data_name]=='filled_interpol').any():
ax.plot(df.time[df.meta_filled[data_name]=='filled_interpol'],
df.filled[data_name][df.meta_filled[data_name]=='filled_interpol'],
'.b',label='filled (interpolation)')
if (df.meta_filled[data_name]=='filled_ratio').any():
ax.plot(df.time[df.meta_filled[data_name]=='filled_ratio'],
df.filled[data_name][df.meta_filled[data_name]=='filled_ratio'],
'.m',label='filled (ratio-based)')
if (df.meta_filled[data_name]=='filled_correlation').any():
ax.plot(df.time[df.meta_filled[data_name]=='filled_correlation'],
df.filled[data_name][df.meta_filled[data_name]=='filled_correlation'],
'.k',label='filled (correlation-based)')
if (df.meta_filled[data_name]=='filled_average_profile').any():
ax.plot(df.time[df.meta_filled[data_name]=='filled_average_profile'],
df.filled[data_name][df.meta_filled[data_name]=='filled_average_profile'],
'.y',label='filled (typical day)')
if (df.meta_filled[data_name]=='filled_infl_model').any():
ax.plot(df.time[df.meta_filled[data_name]=='filled_infl_model'],
df.filled[data_name][df.meta_filled[data_name]=='filled_infl_model'],
'.c',label='filled (influent model)')
if (df.meta_filled[data_name]=='filled_profile_day_before').any():
ax.plot(df.time[df.meta_filled[data_name]=='filled_profile_day_before'],
df.filled[data_name][df.meta_filled[data_name]=='filled_profile_day_before'],
'.',label='filled (previous day)')
#if (df.meta_filled[data_name]=='filled_savitzky_golay').any():
# ax.plot(df.time[df.meta_filled[data_name]=='filled_savitzky_golay'],
# df.filled[data_name][df.meta_filled[data_name]=='filled_savitzky_golay'],
# '.m',label='filled (Savitzky-Golay filter)')
elif self._plot == 'valid':
df.meta_valid = self.meta_valid[time_range[0]:time_range[1]].copy()
ax.plot(df.time[self.meta_valid[data_name]=='original'],
df.data[data_name][df.meta_valid[data_name]=='original'],
'.g',label='original')
if only_checked == False:
if (df.meta_valid[data_name]=='filtered').any():
if data_name in df.filled.columns:
ax.plot(df.time[df.meta_valid[data_name]=='filtered'],
df.filled[data_name][df.meta_valid[data_name]=='filtered'],
'.r',label='filtered')
else:
ax.plot(df.time[df.meta_valid[data_name]=='filtered'],
df.data[data_name][df.meta_valid[data_name]=='filtered'],
'.r',label='filtered')
print (str(float(df.meta_valid.groupby(data_name).size()['original']*100)/ \
float(df.meta_valid[data_name].count())) + \
'% datapoints are left over from the original ' + \
str(float(df.meta_valid[data_name].count())))
ax.legend(bbox_to_anchor=(1.05,1),loc=2,fontsize=16)
ax.set_xlabel(self.timename,fontsize=20)
ax.set_xlim(time_range[0],time_range[1])
ax.set_ylabel(data_name,fontsize=20)
ax.tick_params(labelsize=14)
return fig, ax
# def plot_analysed(self,data_name):
# """
#
# """
# fig = plt.figure(figsize=(16,6))
# ax = fig.add_subplot(111)
#
# if not self._plot == 'filled' or self._plot == 'valid':
# ValueError('No filtering or filling of the current dataset has been done.\
# Run any filter or filling function to start the data analysis.')
#
# if self._plot == 'filled':
# ax.plot(self.time[self.meta_filled[data_name]=='original'],
# self.data[data_name][self.meta_filled[data_name]=='original'],
# '.g',label='original')
# if (self.meta_filled[data_name]=='filtered').any():
# ax.plot(self.time[self.meta_filled[data_name]=='filtered'],
# self.data[data_name][self.meta_filled[data_name]=='filtered'],
# '.r',label='filtered')
# if (self.meta_filled[data_name]=='filled_interpol').any():
# ax.plot(self.time[self.meta_filled[data_name]=='filled_interpol'],
# self.filled[data_name][self.meta_filled[data_name]=='filled_interpol'],
# '.b',label='filled (interpolation)')
# if (self.meta_filled[data_name]=='filled_ratio').any():
# ax.plot(self.time[self.meta_filled[data_name]=='filled_ratio'],
# self.filled[data_name][self.meta_filled[data_name]=='filled_ratio'],
# '.m',label='filled (ratio-based)')
# if (self.meta_filled[data_name]=='filled_correlation').any():
# ax.plot(self.time[self.meta_filled[data_name]=='filled_correlation'],
# self.filled[data_name][self.meta_filled[data_name]=='filled_correlation'],
# '.k',label='filled (correlation-based)')
# if (self.meta_filled[data_name]=='filled_average_profile').any():
# ax.plot(self.time[self.meta_filled[data_name]=='filled_average_profile'],
# self.filled[data_name][self.meta_filled[data_name]=='filled_average_profile'],
# '.y',label='filled (typical day)')
# if (self.meta_filled[data_name]=='filled_infl_model').any():
# ax.plot(self.time[self.meta_filled[data_name]=='filled_infl_model'],
# self.filled[data_name][self.meta_filled[data_name]=='filled_infl_model'],
# '.c',label='filled (influent model)')
#
# elif self._plot == 'valid':
# ax.plot(self.time[self.meta_valid[data_name]=='original'],
# self.data[data_name][self.meta_valid[data_name]=='original'],
# '.g',label='original')
# if (self.meta_valid[data_name]=='filtered').any():
# if data_name in self.filled.columns:
# ax.plot(self.time[self.meta_valid[data_name]=='filtered'],
# self.filled[data_name][self.meta_valid[data_name]=='filtered'],
# '.r',label='filtered')
# else:
# ax.plot(self.time[self.meta_valid[data_name]=='filtered'],
# self.data[data_name][self.meta_valid[data_name]=='filtered'],
# '.r',label='filtered')
#
# ax.legend(fontsize=16)
# ax.set_xlabel(self.timename,fontsize=14)
# ax.set_ylabel(data_name,fontsize=14)
# ax.tick_params(labelsize=14)
#
# print str(float(self.meta_valid.groupby(data_name).size()['original']*100)/ \
# float(self.meta_valid[data_name].count())) + \
# '% datapoints are left over from the original ' + \
# str(float(self.meta_valid[data_name].count()))
# return fig, ax
##############################
### NON-CLASS FUNCTIONS ###
##############################
def total_seconds(timedelta_value):
return timedelta_value.total_seconds()
def _print_removed_output(original,new,function):
"""
function printing the output of functions that tag datapoints.
Parameters
----------
original : int
original length of the dataset
new : int
length of the new dataset
function : str
info on the function used to filter the data
"""
print(str(original-new) + ' values detected and tagged as filtered by function ' + function)
def _log_removed_output(log_file,original,new,type_):
"""
function writing the output of functions that remove datapoints to a log file.
Parameters
----------
log_file : str
string containing the directory to the log file to be written out
original : int
original length of the dataset
new : int
length of the new dataset
type_ : str
'removed' or 'dropped'
"""
log_file = open(log_file,'a')
log_file.write(str('\nOriginal dataset: '+str(original)+' datapoints; new dataset: '+
str(new)+' datapoints'+str(original-new)+' datapoints ',type_))
log_file.close()
# Prepends a WEST-header to read-in text files, to make them WEST compatible
def _prepend_WEST_header(filepath,sep,column_names,outputfilename,
comment='no comments'):
"""
"""
f = open(filepath,'r')
columns = f.readlines()
temp = f.readlines()[1:]
f.close()
f = open(outputfilename, 'w')
#f.write("%%Version3.3\ %%BeginComment\ ")
#f.write(comment)
#f.write("%%EndComment\ %%BeginHeader\ ")
#f.write(str())#write the names
#f.write(str())#write the units
f.write(temp)
f.close()
| agpl-3.0 | -375,071,956,958,271,900 | 42.792012 | 164 | 0.55373 | false |
54lihaoxin/leetcode_python | src/StringToInteger/solution.py | 1 | 2389 | # String to Integer (atoi)
#
# Implement atoi to convert a string to an integer.
#
# Hint: Carefully consider all possible input cases. If you want a challenge, please do not see below and ask yourself what are the possible input cases.
#
# Notes: It is intended for this problem to be specified vaguely (ie, no given input specs). You are responsible to gather all the input requirements up front.
#
# spoilers alert... click to show requirements for atoi.
#
# Requirements for atoi:
# The function first discards as many whitespace characters as necessary until the first non-whitespace character is found. Then, starting from this character, takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.
#
# The string can contain additional characters after those that form the integral number, which are ignored and have no effect on the behavior of this function.
#
# If the first sequence of non-whitespace characters in str is not a valid integral number, or if no such sequence exists because either str is empty or it contains only whitespace characters, no conversion is performed.
#
# If no valid conversion could be performed, a zero value is returned. If the correct value is out of the range of representable values, INT_MAX (2147483647) or INT_MIN (-2147483648) is returned.
debug = True
debug = False
from CommonClasses import *
class Solution:
# @return an integer
def atoi(self, str):
if str == None or len(str) == 0:
return 0
newStr = ''
sign = 1
while str[0] == ' ':
str = str[1:]
if len(str) > 1 and str[0] in '-+':
if str[0] == '-':
sign = -1
str = str[1:]
if len(str) == 0:
return 0
for c in str:
if c in '1234567890':
newStr += c
else:
break
if len(newStr) == 0:
return 0
if sign == 1:
# hxl: OJ doesn't allow sys.maxint... so hard code max of int as 2147483647 here
return min(2147483647, long(newStr))
else:
return max(-2147483648, -long(newStr))
| apache-2.0 | -8,124,587,870,390,814,000 | 35.359375 | 296 | 0.60653 | false |
coati-00/nepi | terrain.py | 1 | 5509 | # -*- coding: utf-8 -*-
from django.test import client
from lettuce import before, after, world, step
from lettuce.django import django_url
import os
import time
from django.conf import settings
try:
from lxml import html
from selenium import webdriver
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
except:
pass
def skip_selenium():
return (os.environ.get('LETTUCE_SKIP_SELENIUM', False)
or (hasattr(settings, 'LETTUCE_SKIP_SELENIUM')
and settings.LETTUCE_SKIP_SELENIUM))
@before.harvest
def setup_browser(variables):
world.using_selenium = False
if skip_selenium():
world.browser = None
world.skipping = False
else:
ff_profile = FirefoxProfile()
ff_profile.set_preference("webdriver_enable_native_events", False)
world.firefox = webdriver.Firefox(ff_profile)
world.using_selenium = False
world.client = client.Client()
@after.harvest
def teardown_browser(total):
if not skip_selenium():
world.firefox.quit()
@before.harvest
def setup_database(_foo):
# make sure we have a fresh test database
os.system("rm -f lettuce.db")
os.system("cp test_data/test.db lettuce.db")
@after.harvest
def teardown_database(_foo):
os.system("rm -f lettuce.db")
@before.each_scenario
def clear_data(_foo):
pass
@step(u'Using selenium')
def using_selenium(step):
if skip_selenium():
world.skipping = True
else:
world.using_selenium = True
@step(u'Finished using selenium')
def finished_selenium(step):
if skip_selenium():
world.skipping = False
else:
world.using_selenium = False
@before.each_scenario
def clear_selenium(step):
world.using_selenium = False
@step(r'I access the url "(.*)"')
def access_url(step, url):
if world.using_selenium:
world.firefox.get(django_url(url))
else:
response = world.client.get(django_url(url))
world.dom = html.fromstring(response.content)
@step(u'I am not logged in')
def i_am_not_logged_in(step):
if world.using_selenium:
world.firefox.get(django_url("/accounts/logout/"))
else:
world.client.logout()
@step(u'I am taken to a login screen')
def i_am_taken_to_a_login_screen(step):
assert len(world.response.redirect_chain) > 0
(url, status) = world.response.redirect_chain[0]
assert status == 302, status
assert "/login/" in url, "URL redirected to was %s" % url
@step(u'there is not an? "([^"]*)" link')
def there_is_not_a_link(step, text):
found = False
for a in world.dom.cssselect("a"):
if a.text and a.text.strip() == text:
found = True
assert not found
@step(u'there is an? "([^"]*)" link')
def there_is_a_link(step, text):
found = False
for a in world.dom.cssselect("a"):
if a.text and a.text.strip() == text:
found = True
assert found
@step(u'I click the "([^"]*)" link')
def i_click_the_link(step, text):
if not world.using_selenium:
for a in world.dom.cssselect("a"):
if a.text:
if text.strip().lower() in a.text.strip().lower():
href = a.attrib['href']
response = world.client.get(django_url(href))
world.dom = html.fromstring(response.content)
return
assert False, "could not find the '%s' link" % text
else:
try:
link = world.firefox.find_element_by_partial_link_text(text)
assert link.is_displayed()
link.click()
except:
try:
time.sleep(1)
link = world.firefox.find_element_by_partial_link_text(text)
assert link.is_displayed()
link.click()
except:
world.firefox.get_screenshot_as_file("/tmp/selenium.png")
assert False, link.location
@step(u'I fill in "([^"]*)" in the "([^"]*)" form field')
def i_fill_in_the_form_field(step, value, field_name):
# note: relies on input having id set, not just name
if not world.using_selenium:
assert False, \
"this step needs to be implemented for the django test client"
world.firefox.find_element_by_id(field_name).send_keys(value)
@step(u'I submit the "([^"]*)" form')
def i_submit_the_form(step, id):
if not world.using_selenium:
assert False, \
"this step needs to be implemented for the django test client"
world.firefox.find_element_by_id(id).submit()
@step('I go back')
def i_go_back(self):
""" need to back out of games currently"""
if not world.using_selenium:
assert False, \
"this step needs to be implemented for the django test client"
world.firefox.back()
@step(u'I wait for (\d+) seconds')
def wait(step, seconds):
time.sleep(int(seconds))
@step(r'I see the header "(.*)"')
def see_header(step, text):
if world.using_selenium:
assert text.strip() == world.firefox.find_element_by_css_selector(
".hero-unit>h1").text.strip()
else:
# header = world.dom.cssselect('h1')[0]
header = world.dom.cssselect('.hero-unit>h1')[0]
assert text.strip() == header.text_content().strip()
@step(r'I see the page title "(.*)"')
def see_title(step, text):
if world.using_selenium:
assert text == world.firefox.title
else:
assert text == world.dom.find(".//title").text
| gpl-2.0 | 2,688,747,096,823,511,600 | 26.545 | 76 | 0.615901 | false |
iw3hxn/LibrERP | crm_lead_correct/models/res_partner_category.py | 1 | 1909 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2014 Didotech SRL (info at didotech.com)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from openerp.osv import orm
from tools.translate import _
class ResPartnerCategory(orm.Model):
_inherit = 'res.partner.category'
_order = "name"
def create(self, cr, uid, values, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
if context.get('no_create', False):
raise orm.except_orm(
'Errore',
_('It is not allowed to create category from here'))
return super(ResPartnerCategory, self).create(cr, uid, values, context)
| agpl-3.0 | 1,621,162,907,417,058,000 | 42.386364 | 79 | 0.655317 | false |
jslvtr/AC41004-Team-2 | tests/unit/test_event.py | 1 | 3539 | from unittest import TestCase
import uuid
from src.common.database import Database
from src.models.event import Event, NoSuchEventExistException
from datetime import datetime
import os
__author__ = 'stamas01'
mongodb_user = os.environ.get("MONGODB_USER")
mongodb_password = os.environ.get("MONGODB_PASSWORD")
mongo_url = os.environ.get("MONGODB_URL")
mongo_port = os.environ.get("MONGODB_PORT")
mongo_database = os.environ.get("MONGODB_DATABASE")
class TestEvent(TestCase):
def setUp(self):
Database.initialize(mongodb_user, mongodb_password, mongo_url, int(mongo_port), mongo_database)
def test_factory_form_json(self):
event = Event("Test", "lk", "virtual", 10, datetime.now(),datetime.now())
self.assertEqual(event, Event.factory_form_json(event.to_json()), "Creating event object from json failed")
def test_save_to_db(self):
event = Event("Test", "lk", "virtual", 10, datetime.now(),datetime.now())
event.save_to_db()
try:
test_result = Event.get_by_id(event.get_id())
except NoSuchEventExistException:
self.fail()
event.remove_from_db()
self.assertEqual(test_result, event, "Saved and retrieved event is not the same")
def test_remove_from_db(self):
event = Event("Test", "lk", "virtual", 10, datetime.now(),datetime.now())
event.save_to_db()
try:
event.remove_from_db()
except NoSuchEventExistException:
self.fail("Error occurred when tried to delete existing event")
def test_remove_non_existing_event_from_db(self):
event = Event("Test", "lk", "virtual", 10, datetime.now(),datetime.now(), uuid.uuid4())
self.assertRaises(NoSuchEventExistException,event.remove_from_db())
def test_not_synced(self):
event = Event("Test", "lk", "virtual", 10, datetime.now(),datetime.now())
event.save_to_db()
event.set_title("TestUpdated")
self.assertFalse(event.is_synced(),"event marked synced when it is not")
event.remove_from_db()
def test_is_synced(self):
event = Event("Test", "lk", "virtual", 10, datetime.now(), datetime.now())
event.save_to_db()
event.set_title("TestUpdated")
event.sync_to_db()
self.assertTrue(event.is_synced(), "event marked un-synced when it is")
event.remove_from_db()
def test_is_valid_model(self):
event = Event("Test", "lk", "virtual", 10, datetime.now(), datetime.now(), uuid.uuid4())
self.assertTrue(event.is_valid_model(),"Valid model is invalid")
def test_is_not_valid_model(self):
event = Event(12, 12, "virtual", 10, "hello","sd")
self.assertFalse(event.is_valid_model(),"Invalid model is valid")
def test_sync_to_db(self):
event = Event("Test", "lk", "virtual", 10, datetime.now(), datetime.now())
event.save_to_db()
event.set_title("TestUpdated")
event.sync_to_db()
try:
test_result = Event.get_by_id(event.get_id())
except NoSuchEventExistException:
self.fail()
event.remove_from_db()
self.assertEqual(test_result, event, "Sync event with database failed")
def test_to_json(self):
dt = datetime.now()
id_ = uuid.uuid4()
event = Event("Test", "lk", "virtual", 10, dt,dt,id_)
self.equal = self.assertEqual(event.to_json(), {'title': 'Test', 'description': 'lk', "event_type": "virtual", "points": 10, 'start': dt, 'end': dt, '_id': id_})
| gpl-2.0 | 71,280,100,317,924,000 | 39.678161 | 169 | 0.628426 | false |
foer/linuxmuster-client-unity | tests/autopilot/unity/emulators/panel.py | 1 | 11333 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright 2012 Canonical
# Author: Marco Trevisan (Treviño)
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
from __future__ import absolute_import
import logging
from time import sleep
from autopilot.input import Mouse
from autopilot.keybindings import KeybindingsHelper
from unity.emulators import UnityIntrospectionObject
logger = logging.getLogger(__name__)
class PanelController(UnityIntrospectionObject):
"""The PanelController class."""
def get_panel_for_monitor(self, monitor_num):
"""Return an instance of panel for the specified monitor, or None."""
panels = self.get_children_by_type(UnityPanel, monitor=monitor_num)
assert(len(panels) == 1)
return panels[0]
def get_active_panel(self):
"""Return the active panel, or None."""
panels = self.get_children_by_type(UnityPanel, active=True)
assert(len(panels) == 1)
return panels[0]
def get_active_indicator(self):
for panel in self.get_panels:
active = panel.get_active_indicator()
if active:
return active
return None
@property
def get_panels(self):
"""Return the available panels, or None."""
return self.get_children_by_type(UnityPanel)
class UnityPanel(UnityIntrospectionObject, KeybindingsHelper):
"""An individual panel for a monitor."""
def __init__(self, *args, **kwargs):
super(UnityPanel, self).__init__(*args, **kwargs)
self._mouse = Mouse.create()
def __get_menu_view(self):
"""Return the menu view."""
menus = self.get_children_by_type(MenuView)
assert(len(menus) == 1)
return menus[0]
def __get_window_buttons(self):
"""Return the window buttons view."""
buttons = self.menus.get_children_by_type(WindowButtons)
assert(len(buttons) == 1)
return buttons[0]
def __get_grab_area(self):
"""Return the panel grab area."""
grab_areas = self.menus.get_children_by_type(GrabArea)
assert(len(grab_areas) == 1)
return grab_areas[0]
def __get_indicators_view(self):
"""Return the menu view."""
indicators = self.get_children_by_type(Indicators)
assert(len(indicators) == 1)
return indicators[0]
def move_mouse_below_the_panel(self):
"""Places the mouse to bottom of this panel."""
(x, y, w, h) = self.geometry
target_x = x + w / 2
target_y = y + h + 10
logger.debug("Moving mouse away from panel.")
self._mouse.move(target_x, target_y)
def move_mouse_over_menus(self):
"""Move the mouse over the menu area for this panel."""
(x, y, w, h) = self.menus.geometry
target_x = x + w / 2
target_y = y + h / 2
# The menu view has bigger geometry than the real layout
menu_entries = self.menus.get_entries()
if len(menu_entries) > 0:
first_x = menu_entries[0].x
last_x = menu_entries[-1].x + menu_entries[-1].width / 2
target_x = first_x + (last_x - first_x) / 2
logger.debug("Moving mouse to center of menu area.")
self._mouse.move(target_x, target_y)
def move_mouse_over_grab_area(self):
"""Move the mouse over the grab area for this panel."""
(x, y, w, h) = self.grab_area.geometry
target_x = x + w / 2
target_y = y + h / 2
logger.debug("Moving mouse to center of grab area.")
self._mouse.move(target_x, target_y)
def move_mouse_over_window_buttons(self):
"""Move the mouse over the center of the window buttons area for this panel."""
(x, y, w, h) = self.window_buttons.geometry
target_x = x + w / 2
target_y = y + h / 2
logger.debug("Moving mouse to center of the window buttons.")
self._mouse.move(target_x, target_y)
def move_mouse_over_indicators(self):
"""Move the mouse over the center of the indicators area for this panel."""
(x, y, w, h) = self.indicators.geometry
target_x = x + w / 2
target_y = y + h / 2
logger.debug("Moving mouse to center of the indicators area.")
self._mouse.move(target_x, target_y)
def get_indicator_entries(self, visible_only=True, include_hidden_menus=False):
"""Returns a list of entries for this panel including both menus and indicators"""
entries = []
if include_hidden_menus or self.menus_shown:
entries = self.menus.get_entries()
entries += self.indicators.get_ordered_entries(visible_only)
return entries
def get_active_indicator(self):
"""Returns the indicator entry that is currently active"""
entries = self.get_indicator_entries(False, True)
entries = filter(lambda e: e.active == True, entries)
assert(len(entries) <= 1)
return entries[0] if entries else None
def get_indicator_entry(self, entry_id):
"""Returns the indicator entry for the given ID or None"""
entries = self.get_indicator_entries(False, True)
entries = filter(lambda e: e.entry_id == entry_id, entries)
assert(len(entries) <= 1)
return entries[0] if entries else None
@property
def title(self):
return self.menus.panel_title
@property
def desktop_is_active(self):
return self.menus.desktop_active
@property
def menus_shown(self):
return self.active and self.menus.draw_menus
@property
def window_buttons_shown(self):
return self.menus.draw_window_buttons
@property
def window_buttons(self):
return self.__get_window_buttons()
@property
def menus(self):
return self.__get_menu_view()
@property
def grab_area(self):
return self.__get_grab_area()
@property
def indicators(self):
return self.__get_indicators_view()
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the current panel."""
return (self.x, self.y, self.width, self.height)
class MenuView(UnityIntrospectionObject):
"""The Menu View class."""
def get_entries(self):
"""Return a list of menu entries"""
entries = self.get_children_by_type(IndicatorEntry)
# We need to filter out empty entries, which are seperators - those
# are not valid, visible and working entries
# For instance, gedit adds some of those, breaking our tests
entries = [e for e in entries if (e.label != "")]
return entries
def get_menu_by_label(self, entry_label):
"""Return the first indicator entry found with the given label"""
indicators = self.get_children_by_type(IndicatorEntry, label=entry_label)
return indicators[0] if indicators else None
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the current menu view."""
return (self.x, self.y, self.width, self.height)
class WindowButtons(UnityIntrospectionObject):
"""The window buttons class"""
def get_buttons(self, visible_only=True):
"""Return a list of window buttons"""
if visible_only:
return self.get_children_by_type(WindowButton, visible=True)
else:
return self.get_children_by_type(WindowButton)
def get_button(self, type):
buttons = self.get_children_by_type(WindowButton, type=type)
assert(len(buttons) == 1)
return buttons[0]
@property
def visible(self):
return len(self.get_buttons()) != 0
@property
def close(self):
return self.get_button("Close")
@property
def minimize(self):
return self.get_button("Minimize")
@property
def unmaximize(self):
return self.get_button("Unmaximize")
@property
def maximize(self):
return self.get_button("Maximize")
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the current panel."""
return (self.x, self.y, self.width, self.height)
class WindowButton(UnityIntrospectionObject):
"""The Window WindowButton class."""
def __init__(self, *args, **kwargs):
super(WindowButton, self).__init__(*args, **kwargs)
self._mouse = Mouse.create()
def mouse_move_to(self):
target_x = self.x + self.width / 2
target_y = self.y + self.height / 2
self._mouse.move(target_x, target_y, rate=20, time_between_events=0.005)
def mouse_click(self):
self.mouse_move_to()
sleep(.2)
self._mouse.click(press_duration=.1)
sleep(.01)
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the window button."""
return (self.x, self.y, self.width, self.height)
class GrabArea(UnityIntrospectionObject):
"""The grab area class"""
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the grab area."""
return (self.x, self.y, self.width, self.height)
class Indicators(UnityIntrospectionObject):
"""The Indicators View class."""
def get_ordered_entries(self, visible_only=True):
"""Return a list of indicators, ordered by their priority"""
if visible_only:
entries = self.get_children_by_type(IndicatorEntry, visible=True)
else:
entries = self.get_children_by_type(IndicatorEntry)
return sorted(entries, key=lambda entry: entry.priority)
def get_indicator_by_name_hint(self, name_hint):
"""Return the IndicatorEntry with the name_hint"""
indicators = self.get_children_by_type(IndicatorEntry, name_hint=name_hint)
assert(len(indicators) == 1)
return indicators[0]
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the indicators area."""
return (self.x, self.y, self.width, self.height)
class IndicatorEntry(UnityIntrospectionObject):
"""The IndicatorEntry View class."""
def __init__(self, *args, **kwargs):
super(IndicatorEntry, self).__init__(*args, **kwargs)
self._mouse = Mouse.create()
def mouse_move_to(self):
target_x = self.x + self.width / 2
target_y = self.y + self.height / 2
self._mouse.move(target_x, target_y, rate=20, time_between_events=0.005)
def mouse_click(self, button=1):
self.mouse_move_to()
sleep(.2)
assert(self.visible)
self._mouse.click(press_duration=.1)
sleep(.01)
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the indicator entry."""
return (self.x, self.y, self.width, self.height)
@property
def menu_geometry(self):
"""Returns a tuple of (x,y,w,h) for the opened menu geometry."""
return (self.menu_x, self.menu_y, self.menu_width, self.menu_height)
def __repr__(self):
with self.no_automatic_refreshing():
return "<IndicatorEntry 0x%x (%s)>" % (id(self), self.label)
class Tray(UnityIntrospectionObject):
"""A panel tray object."""
| gpl-3.0 | -4,977,705,003,350,863,000 | 31.193182 | 90 | 0.618602 | false |
ComputerArchitectureGroupPWr/Floorplan-Maker | src/device.py | 1 | 7566 | import xml.dom.minidom as dom
from math import *
class Element:
x = 0
y = 0
def __eq__(self,other):
if(isinstance(other,Element)):
if( self.x == other.x and self.y == other.y):
return True
else:
return False
return NotImplemented
def __ne__(self,other):
if(isinstance(other,Element)):
if(self.x != other.x or self.y != other.y):
return True
else:
return False
return NotImplemented
def __init__(self, xpos, ypos):
self.x = xpos
self.y = ypos
class Device:
__columns = 0
__rows = 0
__occupiedSpace = list()
__freeSpace = list()
__firstUnit = 0
def __init__(self):
pass
def getColumns(self):
return self.__columns
def getRows(self):
return self.__rows
def getOccupiedSpace(self):
return self.__occupiedSpace
def getFreeSpace(self):
return self.__freeSpace
def setFreeSpaceFromFile(self, xmlDocument):
self.setOccupiedSpaceFromFile(xmlDocument)
occ = self.getOccupiedSpace()
oldY = occ[0].y
freeList = list()
for element in occ:
diff = element.y - oldY
if(diff > 1):
for i in range(1,diff):
newElement = Element(element.x, oldY + i)
freeList.append(newElement)
oldY = element.y
sortedFreeList = sorted(freeList, key= lambda obj: (obj.x, obj.y))
self.__freeSpace = sortedFreeList
def setDeviceSizeFromFile(self,xmlDocument):
size = xmlDocument.getElementsByTagName("size")
size = size[0]
self.__columns = size.getAttribute("cols")
self.__rows = size.getAttribute("rows")
def setOccupiedSpaceFromFile(self,xmlDocument):
obstacles = xmlDocument.getElementsByTagName("obstacle")
units = xmlDocument.getElementsByTagName("unit")
self.getFirstUnitOccurence(units)
occupied = obstacles + units
occ = list()
for element in occupied:
x = element.getAttribute("x")
y = element.getAttribute("y")
newElement = Element(int(x),int(y))
occ.append(newElement)
sortedOccupied = sorted(occ, key= lambda obj: (obj.x, obj.y))
self.__occupiedSpace = sortedOccupied
def generateLinearThermometers(self,xmlOutputDocument, thermNumber):
root = xmlOutputDocument.getElementsByTagName("board")
root = root[0]
oldY = 0
thermID = 0
occList = self.getOccupiedSpace()
for occ in occList:
col = occ.x
row = occ.y
diff = row - oldY
if(diff > 1):
for i in range(1,diff):
newTherm = xmlOutputDocument.createElement("thermometer")
newTherm.setAttribute("name", "t{}".format(thermID))
newTherm.setAttribute("type", "RO7")
newTherm.setAttribute("col", str(col))
newTherm.setAttribute("row", str(oldY + i))
root.appendChild(newTherm)
thermID = thermID + 1
if(thermID > int(thermNumber) - 1):
return xmlOutputDocument
oldY = row
return xmlOutputDocument
def getFreeRowLenList(self,freeList):
rowsLen = list()
freeList = self.getFreeSpace()
oldRowLen = freeList[0].x
#make a list of rows length
for element in freeList:
diff = element.x - oldRowLen
if(diff < 0):
rowsLen.append(int(oldRowLen + 1))
elif(freeList[-1] is element):
rowsLen.append(int(element.x + 1))
oldRowLen = element.x
return rowsLen
def getFirstUnitOccurence(self,units):
unitsList = list()
for unit in units:
x = unit.getAttribute("x")
y = unit.getAttribute("y")
newElement = Element(int(x),int(y))
unitsList.append(newElement)
firstElement = unitsList[1]
self.__firstUnit = firstElement
print("First Unit x: {} y: {}".format(firstElement.x,firstElement.y))
def getFreeColumnLenList(self,freeList):
colsLen = list()
oldColLen = freeList[0].y
for element in freeList:
diff = element.y - oldColLen
if(diff < 0):
colsLen.append(int(oldColLen + 1))
elif(freeList[-1] is element):
colsLen.append(int(element.y + 1))
return colsLen
def getFreeRowLen(self,sortedFreeList):
maximum = -1
l = 0
listLen = len(sortedFreeList)
colLen = self.getFreeColLen(sortedFreeList)
for i in range(0,listLen,colLen):
if(sortedFreeList[i] > maximum):
maximum = sortedFreeList[i].x
l = l + 1
else:
break
return l
def getFreeColLen(self,sortedFreeList):
maximum = -1
l = 0
for i in sortedFreeList:
if(i.y > maximum):
maximum = i.y
l = l + 1
else:
break
return l
def getFreeSingleRow(self,freeList,index):
singleColumnList = list()
for item in freeList:
if(item.y == index):
singleColumnList.append(item.x)
return singleColumnList
def getFreeSingleColumn(self, freeList, index):
singleRowList = list()
for item in freeList:
if(item.x == index):
singleRowList.append(item.y)
elif(item.x > index):
break
return singleRowList
def generateCoords(self, coordList, termNumber):
coordLen = len(coordList)
posList = list()
for i in range(1,coordLen):
termsLeft = termNumber
newList = list()
for item in range(0,coordLen,i):
newList.append(coordList[item])
termsLeft = termsLeft - 1
if(termsLeft < 0 or termsLeft == 0):
break
if(termsLeft == 0):
posList = newList
return posList
def generateThermometersNet(self, xmlOutDocument,thermsInRow, rowsNumber):
xmlList = xmlOutDocument.getElementsByTagName("board")
root = xmlList[0]
freeList = self.getFreeSpace()
row = self.getFreeSingleRow(freeList,6)
column = self.getFreeSingleColumn(freeList,38)
colsCoords = self.generateCoords(row,int(thermsInRow))
rowsCoords = self.generateCoords(column, int(rowsNumber))
thermID = 0
for row in rowsCoords:
for col in colsCoords:
newElement = xmlOutDocument.createElement("thermometer")
newElement.setAttribute("type","RO7")
newElement.setAttribute("name","T{}".format(str(thermID)))
thermID = thermID + 1
newElement.setAttribute("col",str(col))
newElement.setAttribute("row",str(row))
root.appendChild(newElement)
return xmlOutDocument
def generateXmlHeader(self, xmlOutputDocument, ncdFile):
root = xmlOutputDocument.createElement("board")
root.setAttribute("device", "Virtex5")
root.setAttribute("mode", "emulation")
root.setAttribute("version", "0.1")
xmlOutputDocument.appendChild(root)
inputComponent = xmlOutputDocument.createElement("input")
outputComponent = xmlOutputDocument.createElement("output")
inputComponent.setAttribute("name", str(ncdFile))
ncdName = str(ncdFile).rsplit(".")
ncdName = ncdName[0]
outputComponent.setAttribute("name", "{}_new.ncd".format(ncdName))
root.appendChild(inputComponent)
root.appendChild(outputComponent)
return xmlOutputDocument
| mit | 7,512,026,252,719,605,000 | 23.888158 | 85 | 0.597145 | false |
bitmazk/django-account-keeping | account_keeping/south_migrations/0004_init_invoice_value_fields.py | 1 | 7094 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
from account_keeping.models import Invoice
for invoice in Invoice.objects.all():
invoice.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'account_keeping.account': {
'Meta': {'object_name': 'Account'},
'currency': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'accounts'", 'to': u"orm['account_keeping.Currency']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'total_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '2'})
},
u'account_keeping.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'account_keeping.currency': {
'Meta': {'object_name': 'Currency'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_base_currency': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'iso_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'account_keeping.currencyrate': {
'Meta': {'object_name': 'CurrencyRate'},
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account_keeping.Currency']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'month': ('django.db.models.fields.PositiveIntegerField', [], {}),
'rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '8'}),
'year': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'account_keeping.invoice': {
'Meta': {'ordering': "['invoice_date']", 'object_name': 'Invoice'},
'amount_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'amount_net': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoices'", 'to': u"orm['account_keeping.Currency']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_date': ('django.db.models.fields.DateField', [], {}),
'invoice_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'invoice_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'payment_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'pdf': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'value_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '2'}),
'value_net': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '2'}),
'vat': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '4', 'decimal_places': '2'})
},
u'account_keeping.payee': {
'Meta': {'ordering': "['name']", 'object_name': 'Payee'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'account_keeping.transaction': {
'Meta': {'ordering': "['transaction_date']", 'object_name': 'Transaction'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transactions'", 'to': u"orm['account_keeping.Account']"}),
'amount_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'amount_net': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transactions'", 'to': u"orm['account_keeping.Category']"}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transactions'", 'to': u"orm['account_keeping.Currency']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'transactions'", 'null': 'True', 'to': u"orm['account_keeping.Invoice']"}),
'invoice_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['account_keeping.Transaction']"}),
'payee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transactions'", 'to': u"orm['account_keeping.Payee']"}),
'transaction_date': ('django.db.models.fields.DateField', [], {}),
'transaction_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'value_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '2'}),
'value_net': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '2'}),
'vat': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '4', 'decimal_places': '2'})
}
}
complete_apps = ['account_keeping']
symmetrical = True
| mit | -6,244,437,925,822,441,000 | 74.468085 | 184 | 0.564562 | false |
simbtrix/screenmix | screenmix/shapes/shapeRectangle.py | 1 | 5618 | '''
Created on 25.07.2016
@author: mkennert
'''
from decimal import Decimal
from kivy.properties import ListProperty, NumericProperty, ObjectProperty
from kivy.uix.gridlayout import GridLayout
from crossSectionInformation.rectInformation import RectangleInformation
from crossSectionView.rectView import RectView
from materialEditor.materiallist import MaterialList
from materials.concrete import Concrete
from shapes.ishape import IShape
class ShapeRectangle(GridLayout, IShape):
'''
represents a cross section which has the shape
of a rectangle
'''
# important components
view = ObjectProperty()
# information- rectangle
information = ObjectProperty()
# ack-rectangle
ack = ObjectProperty()
# layer of the cross-section-shape
layers = ListProperty([])
# height of the rectangle
h = NumericProperty(0.5)
# width of the rectangle
w = NumericProperty(0.25)
# area of the rectangle
size = NumericProperty(0.25 * 0.5)
# weight of the rectangle
weight = NumericProperty()
# price of the rectangle
price = NumericProperty()
# cracking-stress of the rectangle
strength = NumericProperty()
# constructor
def __init__(self, **kwargs):
super(ShapeRectangle, self).__init__(**kwargs)
self.cols = 2
self.allMaterials = MaterialList.Instance()
concrete = Concrete()
self.concreteDensity, self.concretePrice = concrete.density, concrete.price
self.concreteStiffness, self.concreteStrength = concrete.stiffness, concrete.strength
self.concreteStrain = self.concreteStrength / self.concreteStiffness
self.information, self.view = RectangleInformation(), RectView()
'''
update the concrete-properties
'''
def update_concrete_information(self, density, price, stiffness, strength):
self.concreteDensity, self.concretePrice = density, price
self.concreteStiffness, self.concreteStrength = stiffness, strength
self.concreteStrain = self.concreteStrength / self.concreteStiffness
'''
the method update_height changes the height of the view
'''
def update_height(self, value):
self.view.update_height(value)
self.h = value
self.size = self.h * self.w
for layer in self.layers:
if layer.focus:
self.refEdit.areaInput.text = '%.2E' % Decimal(str(self.size * layer.p))
'''
the method update_width change the width of the view
'''
def update_width(self, value):
self.view.update_width(value)
self.w = value
self.size = self.h * self.w
for layer in self.layers:
if layer.focus:
self.refEdit.areaInput.text = '%.2E' % Decimal(str(self.size * layer.p))
'''
calculate the weight and the lblPrice of the cross section
'''
def calculate_weight_price(self):
weight = price = 0.
freeplaces = self.view.get_free_places()
# go trough all layers and
# get the weight of them
for layer in self.layers:
cur = layer.get_weight()
weight += cur
price += cur * layer.material.price
# get the free places, where the material is concrete
for layer in freeplaces:
w = (layer[1] - layer[0]) * self.w * self.concreteDensity
weight += w
price += w * self.concretePrice
self.weight, self.price = weight, price
'''
the method calculate_strength calculate the strength of
the cross_section
'''
def calculate_strength(self):
strength = 0.
# cur supremum
self.minOfMaxstrain = 1e6
# max strain is necessary for other calculations
self.maxOfMaxstrain = 0
# find the minimum max_strain and the maximum max_strain
for layer in self.layers:
curStrain = layer.strain
# proof whether the curStrain is smaller as the min
if curStrain < self.minOfMaxstrain:
self.minOfMaxstrain = curStrain
# proof whether the curStrain is bigger as the max
if curStrain > self.maxOfMaxstrain:
self.maxOfMaxstrain = curStrain
# if the percentOfLayers is not 1 there is a matrix
# with concrete as material
freePlaces = self.view.get_free_places()
if len(freePlaces) > 0:
curValue = self.concreteStrength / self.concreteStiffness
if self.minOfMaxstrain > curValue:
self.minOfMaxstrain = curValue
if self.maxOfMaxstrain < curValue:
self.maxOfMaxstrain = curValue
# calculate the strength
for layer in self.layers:
strength += self.minOfMaxstrain * \
layer.material.stiffness * layer.h / self.h
for layer in freePlaces:
strength += self.minOfMaxstrain * \
(layer[1] - layer[0]) / self.h * self.concreteStiffness
self.strength = strength
'''
set the editor
'''
def set_reinforcement_editor(self, editor):
self.refEdit = editor
self.information.cs = self
self.information.create_gui()
self.view.cs = self
self.calculate_weight_price()
self.calculate_strength()
self.update_cs_information()
| gpl-3.0 | -7,661,164,327,814,738,000 | 31.440476 | 93 | 0.606978 | false |
beeftornado/sentry | tests/sentry/lang/native/test_utils.py | 1 | 1580 | from __future__ import absolute_import
from sentry.lang.native.utils import get_sdk_from_event, is_minidump_event
def test_get_sdk_from_event():
sdk_info = get_sdk_from_event(
{
"debug_meta": {
"sdk_info": {
"sdk_name": "iOS",
"version_major": 9,
"version_minor": 3,
"version_patchlevel": 0,
}
}
}
)
assert sdk_info["sdk_name"] == "iOS"
assert sdk_info["version_major"] == 9
assert sdk_info["version_minor"] == 3
assert sdk_info["version_patchlevel"] == 0
sdk_info = get_sdk_from_event(
{"contexts": {"os": {"type": "os", "name": "iOS", "version": "9.3.1.1234"}}}
)
assert sdk_info["sdk_name"] == "iOS"
assert sdk_info["version_major"] == 9
assert sdk_info["version_minor"] == 3
assert sdk_info["version_patchlevel"] == 1
def test_is_minidump():
assert is_minidump_event({"exception": {"values": [{"mechanism": {"type": "minidump"}}]}})
assert not is_minidump_event({"exception": {"values": [{"mechanism": {"type": "other"}}]}})
assert not is_minidump_event({"exception": {"values": [{"mechanism": {"type": None}}]}})
assert not is_minidump_event({"exception": {"values": [{"mechanism": None}]}})
assert not is_minidump_event({"exception": {"values": [None]}})
assert not is_minidump_event({"exception": {"values": []}})
assert not is_minidump_event({"exception": {"values": None}})
assert not is_minidump_event({"exception": None})
| bsd-3-clause | 1,585,955,638,993,456,400 | 36.619048 | 95 | 0.555063 | false |
nicoboss/Floatmotion | pygame/tests/pixelcopy_test.py | 1 | 25821 | if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils import test_not_implemented, unittest
else:
from test.test_utils import test_not_implemented, unittest
import pygame
from pygame.locals import *
from pygame.pixelcopy import surface_to_array, map_array
def unsigned32(i):
"""cast signed 32 bit integer to an unsigned integer"""
return i & 0xFFFFFFFF
class PixelcopyModuleTest (unittest.TestCase):
bitsizes = [8, 16, 32]
test_palette = [(0, 0, 0, 255),
(10, 30, 60, 255),
(25, 75, 100, 255),
(100, 150, 200, 255),
(0, 100, 200, 255)]
surf_size = (10, 12)
test_points = [((0, 0), 1), ((4, 5), 1), ((9, 0), 2),
((5, 5), 2), ((0, 11), 3), ((4, 6), 3),
((9, 11), 4), ((5, 6), 4)]
def __init__(self, *args, **kwds):
pygame.display.init()
try:
unittest.TestCase.__init__(self, *args, **kwds)
self.sources = [self._make_src_surface(8),
self._make_src_surface(16),
self._make_src_surface(16, srcalpha=True),
self._make_src_surface(24),
self._make_src_surface(32),
self._make_src_surface(32, srcalpha=True)]
finally:
pygame.display.quit()
def _make_surface(self, bitsize, srcalpha=False, palette=None):
if palette is None:
palette = self.test_palette
flags = 0
if srcalpha:
flags |= SRCALPHA
surf = pygame.Surface(self.surf_size, flags, bitsize)
if bitsize == 8:
surf.set_palette([c[:3] for c in palette])
return surf
def _fill_surface(self, surf, palette=None):
if palette is None:
palette = self.test_palette
surf.fill(palette[1], (0, 0, 5, 6))
surf.fill(palette[2], (5, 0, 5, 6))
surf.fill(palette[3], (0, 6, 5, 6))
surf.fill(palette[4], (5, 6, 5, 6))
def _make_src_surface(self, bitsize, srcalpha=False, palette=None):
surf = self._make_surface(bitsize, srcalpha, palette)
self._fill_surface(surf, palette)
return surf
def setUp(self):
pygame.display.init()
def tearDown(self):
pygame.display.quit()
def test_surface_to_array_2d(self):
alpha_color = (0, 0, 0, 128)
for surf in self.sources:
src_bitsize = surf.get_bitsize()
for dst_bitsize in self.bitsizes:
dst = pygame.Surface(surf.get_size(), 0, dst_bitsize)
dst.fill((0, 0, 0, 0))
view = dst.get_view('2')
self.assertFalse(surf.get_locked())
if dst_bitsize < src_bitsize:
self.assertRaises(ValueError, surface_to_array, view, surf)
self.assertFalse(surf.get_locked())
continue
surface_to_array(view, surf)
self.assertFalse(surf.get_locked())
for posn, i in self.test_points:
sp = surf.get_at_mapped(posn)
dp = dst.get_at_mapped(posn)
self.assertEqual(dp, sp,
"%s != %s: flags: %i"
", bpp: %i, posn: %s" %
(dp, sp,
surf.get_flags(), surf.get_bitsize(),
posn))
del view
if surf.get_masks()[3]:
dst.fill((0, 0, 0, 0))
view = dst.get_view('2')
posn = (2, 1)
surf.set_at(posn, alpha_color)
self.assertFalse(surf.get_locked())
surface_to_array(view, surf)
self.assertFalse(surf.get_locked())
sp = surf.get_at_mapped(posn)
dp = dst.get_at_mapped(posn)
self.assertEqual(dp, sp,
"%s != %s: bpp: %i" %
(dp, sp, surf.get_bitsize()))
def test_surface_to_array_3d(self):
if pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN:
masks = (0xff, 0xff00, 0xff0000, 0)
else:
masks = (0xff000000, 0xff0000, 0xff00, 0)
dst = pygame.Surface(self.surf_size, 0, 24, masks=masks)
for surf in self.sources:
dst.fill((0, 0, 0, 0))
src_bitsize = surf.get_bitsize()
view = dst.get_view('3')
self.assertFalse(surf.get_locked())
surface_to_array(view, surf)
self.assertFalse(surf.get_locked())
for posn, i in self.test_points:
sc = surf.get_at(posn)[0:3]
dc = dst.get_at(posn)[0:3]
self.assertEqual(dc, sc,
"%s != %s: flags: %i"
", bpp: %i, posn: %s" %
(dc, sc,
surf.get_flags(), surf.get_bitsize(),
posn))
del view
def test_map_array(self):
targets = [self._make_surface(8),
self._make_surface(16),
self._make_surface(16, srcalpha=True),
self._make_surface(24),
self._make_surface(32),
self._make_surface(32, srcalpha=True),
]
source = pygame.Surface(self.surf_size, 0, 24,
masks=[0xff, 0xff00, 0xff0000, 0])
source_view = source.get_view('3') # (w, h, 3)
for t in targets:
map_array(t.get_view('2'), source_view, t)
for posn, i in self.test_points:
sc = t.map_rgb(source.get_at(posn))
dc = t.get_at_mapped(posn)
self.assertEqual(dc, sc,
"%s != %s: flags: %i"
", bpp: %i, posn: %s" %
(dc, sc,
t.get_flags(), t.get_bitsize(),
posn))
color = pygame.Color("salmon")
color.set_length(3)
for t in targets:
map_array(t.get_view('2'), color, t)
sc = t.map_rgb(color)
for posn, i in self.test_points:
dc = t.get_at_mapped(posn)
self.assertEqual(dc, sc,
"%s != %s: flags: %i"
", bpp: %i, posn: %s" %
(dc, sc,
t.get_flags(), t.get_bitsize(),
posn))
# mismatched shapes
w, h = source.get_size()
target = pygame.Surface((w, h + 1), 0, 32)
self.assertRaises(ValueError, map_array, target, source, target)
target = pygame.Surface((w - 1, h), 0, 32)
self.assertRaises(ValueError, map_array, target, source, target)
def test_array_to_surface_broadcasting(self):
# target surfaces
targets = [self._make_surface(8),
self._make_surface(16),
self._make_surface(16, srcalpha=True),
self._make_surface(24),
self._make_surface(32),
self._make_surface(32, srcalpha=True),
]
w, h = self.surf_size
# broadcast column
column = pygame.Surface((1, h), 0, 32)
for target in targets:
source = pygame.Surface((1, h), 0, target)
for y in range(h):
source.set_at((0, y),
pygame.Color(y + 1, y + h + 1, y + 2 * h + 1))
pygame.pixelcopy.surface_to_array(column.get_view('2'), source)
pygame.pixelcopy.array_to_surface(target, column.get_view('2'))
for x in range(w):
for y in range(h):
self.assertEqual(target.get_at_mapped((x, y)),
column.get_at_mapped((0, y)))
# broadcast row
row = pygame.Surface((w, 1), 0, 32)
for target in targets:
source = pygame.Surface((w, 1), 0, target)
for x in range(w):
source.set_at((x, 0),
pygame.Color(x + 1, x + w + 1, x + 2 * w + 1))
pygame.pixelcopy.surface_to_array(row.get_view('2'), source)
pygame.pixelcopy.array_to_surface(target, row.get_view('2'))
for x in range(w):
for y in range(h):
self.assertEqual(target.get_at_mapped((x, y)),
row.get_at_mapped((x, 0)))
# broadcast pixel
pixel = pygame.Surface((1, 1), 0, 32)
for target in targets:
source = pygame.Surface((1, 1), 0, target)
source.set_at((0, 0), pygame.Color(13, 47, 101))
pygame.pixelcopy.surface_to_array(pixel.get_view('2'), source)
pygame.pixelcopy.array_to_surface(target, pixel.get_view('2'))
p = pixel.get_at_mapped((0, 0))
for x in range(w):
for y in range(h):
self.assertEqual(target.get_at_mapped((x, y)), p)
def todo_test_array_to_surface(self):
# target surfaces
targets = [_make_surface(8),
_make_surface(16),
_make_surface(16, srcalpha=True),
_make_surface(24),
_make_surface(32),
_make_surface(32, srcalpha=True),
]
# source arrays
arrays3d = []
dtypes = [(8, uint8), (16, uint16), (32, uint32)]
try:
dtypes.append((64, uint64))
except NameError:
pass
arrays3d = [(self._make_src_array3d(dtype), None)
for __, dtype in dtypes]
for bitsize in [8, 16, 24, 32]:
palette = None
if bitsize == 16:
s = pygame.Surface((1,1), 0, 16)
palette = [s.unmap_rgb(s.map_rgb(c))
for c in self.test_palette]
if self.pixels3d[bitsize]:
surf = self._make_src_surface(bitsize)
arr = pygame.surfarray.pixels3d(surf)
arrays3d.append((arr, palette))
if self.array3d[bitsize]:
surf = self._make_src_surface(bitsize)
arr = pygame.surfarray.array3d(surf)
arrays3d.append((arr, palette))
for sz, dtype in dtypes:
arrays3d.append((arr.astype(dtype), palette))
# tests on arrays
def do_blit(surf, arr):
pygame.surfarray.blit_array(surf, arr)
for surf in targets:
bitsize = surf.get_bitsize()
for arr, palette in arrays3d:
surf.fill((0, 0, 0, 0))
if bitsize == 8:
self.failUnlessRaises(ValueError, do_blit, surf, arr)
else:
pygame.surfarray.blit_array(surf, arr)
self._assert_surface(surf, palette)
if self.pixels2d[bitsize]:
surf.fill((0, 0, 0, 0))
s = self._make_src_surface(bitsize, surf.get_flags() & SRCALPHA)
arr = pygame.surfarray.pixels2d(s)
pygame.surfarray.blit_array(surf, arr)
self._assert_surface(surf)
if self.array2d[bitsize]:
s = self._make_src_surface(bitsize, surf.get_flags() & SRCALPHA)
arr = pygame.surfarray.array2d(s)
for sz, dtype in dtypes:
surf.fill((0, 0, 0, 0))
if sz >= bitsize:
pygame.surfarray.blit_array(surf, arr.astype(dtype))
self._assert_surface(surf)
else:
self.failUnlessRaises(ValueError, do_blit,
surf, self._make_array2d(dtype))
# Check alpha for 2D arrays
surf = self._make_surface(16, srcalpha=True)
arr = zeros(surf.get_size(), uint16)
arr[...] = surf.map_rgb((0, 128, 255, 64))
color = surf.unmap_rgb(arr[0, 0])
pygame.surfarray.blit_array(surf, arr)
self.assertEqual(surf.get_at((5, 5)), color)
surf = self._make_surface(32, srcalpha=True)
arr = zeros(surf.get_size(), uint32)
color = (0, 111, 255, 63)
arr[...] = surf.map_rgb(color)
pygame.surfarray.blit_array(surf, arr)
self.assertEqual(surf.get_at((5, 5)), color)
# Check shifts
arr3d = self._make_src_array3d(uint8)
shift_tests = [(16,
[12, 0, 8, 4],
[0xf000, 0xf, 0xf00, 0xf0]),
(24,
[16, 0, 8, 0],
[0xff0000, 0xff, 0xff00, 0]),
(32,
[0, 16, 24, 8],
[0xff, 0xff0000, 0xff000000, 0xff00])]
for bitsize, shifts, masks in shift_tests:
surf = self._make_surface(bitsize, srcalpha=(shifts[3] != 0))
palette = None
if bitsize == 16:
palette = [surf.unmap_rgb(surf.map_rgb(c))
for c in self.test_palette]
surf.set_shifts(shifts)
surf.set_masks(masks)
pygame.surfarray.blit_array(surf, arr3d)
self._assert_surface(surf, palette)
# Invalid arrays
surf = pygame.Surface((1,1), 0, 32)
t = 'abcd'
self.failUnlessRaises(ValueError, do_blit, surf, t)
surf_size = self.surf_size
surf = pygame.Surface(surf_size, 0, 32)
arr = zeros([surf_size[0], surf_size[1] + 1, 3], uint32)
self.failUnlessRaises(ValueError, do_blit, surf, arr)
arr = zeros([surf_size[0] + 1, surf_size[1], 3], uint32)
self.failUnlessRaises(ValueError, do_blit, surf, arr)
surf = pygame.Surface((1, 4), 0, 32)
arr = zeros((4,), uint32)
self.failUnlessRaises(ValueError, do_blit, surf, arr)
arr.shape = (1, 1, 1, 4)
self.failUnlessRaises(ValueError, do_blit, surf, arr)
arr = zeros((10, 10), float64)
surf = pygame.Surface((10, 10), 0, 32)
self.failUnlessRaises(ValueError, do_blit, surf, arr)
class PixelCopyTestWithArray(unittest.TestCase):
try:
import numpy
except ImportError:
__tags__ = ['ignore', 'subprocess_ignore']
bitsizes = [8, 16, 32]
test_palette = [(0, 0, 0, 255),
(10, 30, 60, 255),
(25, 75, 100, 255),
(100, 150, 200, 255),
(0, 100, 200, 255)]
surf_size = (10, 12)
test_points = [((0, 0), 1), ((4, 5), 1), ((9, 0), 2),
((5, 5), 2), ((0, 11), 3), ((4, 6), 3),
((9, 11), 4), ((5, 6), 4)]
def __init__(self, *args, **kwds):
import numpy
self.dst_types = [numpy.uint8, numpy.uint16, numpy.uint32]
try:
self.dst_types.append(numpy.uint64)
except AttributeError:
pass
pygame.display.init()
try:
unittest.TestCase.__init__(self, *args, **kwds)
self.sources = [self._make_src_surface(8),
self._make_src_surface(16),
self._make_src_surface(16, srcalpha=True),
self._make_src_surface(24),
self._make_src_surface(32),
self._make_src_surface(32, srcalpha=True)]
finally:
pygame.display.quit()
def _make_surface(self, bitsize, srcalpha=False, palette=None):
if palette is None:
palette = self.test_palette
flags = 0
if srcalpha:
flags |= SRCALPHA
surf = pygame.Surface(self.surf_size, flags, bitsize)
if bitsize == 8:
surf.set_palette([c[:3] for c in palette])
return surf
def _fill_surface(self, surf, palette=None):
if palette is None:
palette = self.test_palette
surf.fill(palette[1], (0, 0, 5, 6))
surf.fill(palette[2], (5, 0, 5, 6))
surf.fill(palette[3], (0, 6, 5, 6))
surf.fill(palette[4], (5, 6, 5, 6))
def _make_src_surface(self, bitsize, srcalpha=False, palette=None):
surf = self._make_surface(bitsize, srcalpha, palette)
self._fill_surface(surf, palette)
return surf
def setUp(self):
pygame.display.init()
def tearDown(self):
pygame.display.quit()
def test_surface_to_array_2d(self):
try:
from numpy import empty, dtype
except ImportError:
return
palette = self.test_palette
alpha_color = (0, 0, 0, 128)
dst_dims = self.surf_size
destinations = [empty(dst_dims, t) for t in self.dst_types]
if (pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN):
swapped_dst = empty(dst_dims, dtype('>u4'))
else:
swapped_dst = empty(dst_dims, dtype('<u4'))
for surf in self.sources:
src_bytesize = surf.get_bytesize()
for dst in destinations:
if dst.itemsize < src_bytesize:
self.assertRaises(ValueError, surface_to_array, dst, surf)
continue
dst[...] = 0
self.assertFalse(surf.get_locked())
surface_to_array(dst, surf)
self.assertFalse(surf.get_locked())
for posn, i in self.test_points:
sp = unsigned32(surf.get_at_mapped(posn))
dp = dst[posn]
self.assertEqual(dp, sp,
"%s != %s: flags: %i"
", bpp: %i, dtype: %s, posn: %s" %
(dp, sp,
surf.get_flags(), surf.get_bitsize(),
dst.dtype,
posn))
if surf.get_masks()[3]:
posn = (2, 1)
surf.set_at(posn, alpha_color)
surface_to_array(dst, surf)
sp = unsigned32(surf.get_at_mapped(posn))
dp = dst[posn]
self.assertEqual(dp, sp, "%s != %s: bpp: %i" %
(dp, sp, surf.get_bitsize()))
swapped_dst[...] = 0
self.assertFalse(surf.get_locked())
surface_to_array(swapped_dst, surf)
self.assertFalse(surf.get_locked())
for posn, i in self.test_points:
sp = unsigned32(surf.get_at_mapped(posn))
dp = swapped_dst[posn]
self.assertEqual(dp, sp,
"%s != %s: flags: %i"
", bpp: %i, dtype: %s, posn: %s" %
(dp, sp,
surf.get_flags(), surf.get_bitsize(),
dst.dtype,
posn))
if surf.get_masks()[3]:
posn = (2, 1)
surf.set_at(posn, alpha_color)
self.assertFalse(surf.get_locked())
surface_to_array(swapped_dst, surf)
self.assertFalse(surf.get_locked())
sp = unsigned32(surf.get_at_mapped(posn))
dp = swapped_dst[posn]
self.assertEqual(dp, sp, "%s != %s: bpp: %i" %
(dp, sp, surf.get_bitsize()))
def test_surface_to_array_3d(self):
try:
from numpy import empty, dtype
except ImportError:
return
palette = self.test_palette
dst_dims = self.surf_size + (3,)
destinations = [empty(dst_dims, t) for t in self.dst_types]
if (pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN):
swapped_dst = empty(dst_dims, dtype('>u4'))
else:
swapped_dst = empty(dst_dims, dtype('<u4'))
for surf in self.sources:
src_bitsize = surf.get_bitsize()
for dst in destinations:
dst[...] = 0
self.assertFalse(surf.get_locked())
surface_to_array(dst, surf)
self.assertFalse(surf.get_locked())
for posn, i in self.test_points:
r_surf, g_surf, b_surf, a_surf = surf.get_at(posn)
r_arr, g_arr, b_arr = dst[posn]
self.assertEqual(r_arr, r_surf,
"%i != %i, color: red, flags: %i"
", bpp: %i, posn: %s" %
(r_arr, r_surf,
surf.get_flags(), surf.get_bitsize(),
posn))
self.assertEqual(g_arr, g_surf,
"%i != %i, color: green, flags: %i"
", bpp: %i, posn: %s" %
(r_arr, r_surf,
surf.get_flags(), surf.get_bitsize(),
posn))
self.assertEqual(b_arr, b_surf,
"%i != %i, color: blue, flags: %i"
", bpp: %i, posn: %s" %
(r_arr, r_surf,
surf.get_flags(), surf.get_bitsize(),
posn))
swapped_dst[...] = 0
self.assertFalse(surf.get_locked())
surface_to_array(swapped_dst, surf)
self.assertFalse(surf.get_locked())
for posn, i in self.test_points:
r_surf, g_surf, b_surf, a_surf = surf.get_at(posn)
r_arr, g_arr, b_arr = swapped_dst[posn]
self.assertEqual(r_arr, r_surf,
"%i != %i, color: red, flags: %i"
", bpp: %i, posn: %s" %
(r_arr, r_surf,
surf.get_flags(), surf.get_bitsize(),
posn))
self.assertEqual(g_arr, g_surf,
"%i != %i, color: green, flags: %i"
", bpp: %i, posn: %s" %
(r_arr, r_surf,
surf.get_flags(), surf.get_bitsize(),
posn))
self.assertEqual(b_arr, b_surf,
"%i != %i, color: blue, flags: %i"
", bpp: %i, posn: %s" %
(r_arr, r_surf,
surf.get_flags(), surf.get_bitsize(),
posn))
def test_map_array(self):
try:
from numpy import array, zeros, uint8, int32, alltrue
except ImportError:
return
surf = pygame.Surface((1, 1), 0, 32)
# color fill
color = array([11, 17, 59], uint8)
target = zeros((5, 7), int32)
map_array(target, color, surf)
self.assert_(alltrue(target == surf.map_rgb(color)))
# array column stripes
stripe = array([[2, 5, 7], [11, 19, 23], [37, 53, 101]], uint8)
target = zeros((4, stripe.shape[0]), int32)
map_array(target, stripe, surf)
target_stripe = array([surf.map_rgb(c) for c in stripe], int32)
self.assert_(alltrue(target == target_stripe))
# array row stripes
stripe = array([[[2, 5, 7]],
[[11, 19, 24]],
[[10, 20, 30]],
[[37, 53, 101]]], uint8)
target = zeros((stripe.shape[0], 3), int32)
map_array(target, stripe, surf)
target_stripe = array([[surf.map_rgb(c)] for c in stripe[:,0]], int32)
self.assert_(alltrue(target == target_stripe))
# mismatched shape
w = 4
h = 5
source = zeros((w, h, 3), uint8)
target = zeros((w,), int32)
self.assertRaises(ValueError, map_array, target, source, surf)
source = zeros((12, w, h + 1), uint8)
self.assertRaises(ValueError, map_array, target, source, surf)
source = zeros((12, w - 1, 5), uint8)
self.assertRaises(ValueError, map_array, target, source, surf)
try:
numpy
except NameError:
# Ensure no methods requiring numpy are run when
# pixelcopy_test is '__main__'.
del __init__
del test_surface_to_array_2d
del test_surface_to_array_3d
del test_map_array
else:
del numpy
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | -6,294,435,277,018,590,000 | 39.09472 | 80 | 0.451261 | false |
YuxuanLing/trunk | trunk/code/study/python/core_python_appilication/ch04/mtsleepF.py | 1 | 1118 | #!/usr/bin/env python
from atexit import register
from random import randrange
from threading import Thread, Lock, currentThread
from time import sleep, ctime
class CleanOutputSet(set):
def __str__(self):
return ', '.join(x for x in self)
lock = Lock()
loops = (randrange(2, 5) for x in xrange(randrange(3, 7)))
remaining = CleanOutputSet()
def loop(nsec):
myname = currentThread().name
lock.acquire()
remaining.add(myname)
print '[%s] Started %s' % (ctime(), myname) #print '[{0}] Started {1}'.format(ctime(), myname)
lock.release()
sleep(nsec)
lock.acquire()
remaining.remove(myname)
print '[%s] Completed %s (%d secs)' % ( #print '[{0}] Completed {1} ({2} secs)'.format(
ctime(), myname, nsec)
print ' (remaining: %s)' % (remaining or 'NONE') #print ' (remaining: {0})'.format(remaining or 'NONE')
lock.release()
def _main():
for pause in loops:
Thread(target=loop, args=(pause,)).start()
@register
def _atexit():
print 'all DONE at:', ctime()
if __name__ == '__main__':
_main()
| gpl-3.0 | 8,837,571,305,694,491,000 | 26.666667 | 113 | 0.595707 | false |
kaizentech/skeleton | urls.py | 1 | 1052 | """temp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^grappelli/', include('grappelli.urls')), # grappelli URLS
url(r'^admin/', admin.site.urls),
]
# Change to whatever you like
admin.site.site_title = '{{ project_name }} Administration'
admin.site.index_title = '{{ project_name }} Administration'
admin.site.site_header = '{{ project_name }} Administration'
| apache-2.0 | 4,995,287,408,869,362,000 | 37.962963 | 79 | 0.701521 | false |
habibmasuro/django-wiki | wiki/urls.py | 1 | 8204 | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, url, include
from wiki.conf import settings
from wiki.core.plugins import registry
from wiki.views import article, accounts
from wiki.core.utils import get_class_from_str
from django.contrib.auth.views import password_reset
class WikiURLPatterns(object):
'''
configurator for wiki urls.
To customize, you can define your own subclass, either overriding
the view providers, or overriding the functions that collect
views.
'''
# basic views
article_view_class = article.ArticleView
article_create_view_class = article.Create
article_delete_view_class = article.Delete
article_deleted_view_class = article.Deleted
article_dir_view_class = article.Dir
article_edit_view_class = article.Edit
article_preview_view_class = article.Preview
article_history_view_class = article.History
article_settings_view_class = article.Settings
article_source_view_class = article.Source
article_plugin_view_class = article.Plugin
revision_change_view = article.ChangeRevisionView
revision_merge_view = 'wiki.views.article.merge'
search_view_class = settings.SEARCH_VIEW
article_diff_view = 'wiki.views.article.diff'
# account views
signup_view_class = accounts.Signup
login_view_class = accounts.Login
logout_view_class = accounts.Logout
def get_urls(self):
urlpatterns = self.get_root_urls()
urlpatterns += self.get_accounts_urls()
urlpatterns += self.get_revision_urls()
urlpatterns += self.get_article_urls()
urlpatterns += self.get_plugin_urls()
# This ALWAYS has to be the last of all the patterns since
# the paths in theory could wrongly match other targets.
urlpatterns += self.get_article_path_urls()
return urlpatterns
def get_root_urls(self):
urlpatterns = patterns('',
url('^$', self.article_view_class.as_view(), name='root', kwargs={'path': ''}),
url('^create-root/$', article.CreateRootView.as_view(), name='root_create'),
url('^missing-root/$', article.MissingRootView.as_view(), name='root_missing'),
url('^_search/$', get_class_from_str(self.search_view_class).as_view(), name='search'),
url('^_revision/diff/(?P<revision_id>\d+)/$', self.article_diff_view, name='diff'),
)
return urlpatterns
def get_accounts_urls(self):
urlpatterns = patterns('',
url('^_accounts/sign-up/$', self.signup_view_class.as_view(), name='signup'),
url('^_accounts/logout/$', self.logout_view_class.as_view(), name='logout'),
url('^_accounts/login/$', self.login_view_class.as_view(), name='login'),
url(r'^accounts/password/reset$', 'django.contrib.auth.views.password_reset', {'template_name': 'wiki/registration/password_reset_form.html'}),
)
return urlpatterns
def get_revision_urls(self):
urlpatterns = patterns('',
# This one doesn't work because it don't know where to redirect after...
url('^_revision/change/(?P<article_id>\d+)/(?P<revision_id>\d+)/$', self.revision_change_view.as_view(), name='change_revision'),
url('^_revision/preview/(?P<article_id>\d+)/$', self.article_preview_view_class.as_view(), name='preview_revision'),
url('^_revision/merge/(?P<article_id>\d+)/(?P<revision_id>\d+)/preview/$', self.revision_merge_view, name='merge_revision_preview', kwargs={'preview': True}),
)
return urlpatterns
def get_article_urls(self):
urlpatterns = patterns('',
# Paths decided by article_ids
url('^(?P<article_id>\d+)/$', self.article_view_class.as_view(), name='get'),
url('^(?P<article_id>\d+)/delete/$', self.article_delete_view_class.as_view(), name='delete'),
url('^(?P<article_id>\d+)/deleted/$', self.article_deleted_view_class.as_view(), name='deleted'),
url('^(?P<article_id>\d+)/edit/$', self.article_edit_view_class.as_view(), name='edit'),
url('^(?P<article_id>\d+)/preview/$', self.article_preview_view_class.as_view(), name='preview'),
url('^(?P<article_id>\d+)/history/$', self.article_history_view_class.as_view(), name='history'),
url('^(?P<article_id>\d+)/settings/$', self.article_settings_view_class.as_view(), name='settings'),
url('^(?P<article_id>\d+)/source/$', self.article_source_view_class.as_view(), name='source'),
url('^(?P<article_id>\d+)/revision/change/(?P<revision_id>\d+)/$', self.revision_change_view.as_view(), name='change_revision'),
url('^(?P<article_id>\d+)/revision/merge/(?P<revision_id>\d+)/$', self.revision_merge_view, name='merge_revision'),
url('^(?P<article_id>\d+)/plugin/(?P<slug>\w+)/$', self.article_plugin_view_class.as_view(), name='plugin'),
)
return urlpatterns
def get_article_path_urls(self):
urlpatterns = patterns('',
# Paths decided by URLs
url('^(?P<path>.+/|)_create/$', self.article_create_view_class.as_view(), name='create'),
url('^(?P<path>.+/|)_delete/$', self.article_delete_view_class.as_view(), name='delete'),
url('^(?P<path>.+/|)_deleted/$', self.article_deleted_view_class.as_view(), name='deleted'),
url('^(?P<path>.+/|)_edit/$', self.article_edit_view_class.as_view(), name='edit'),
url('^(?P<path>.+/|)_preview/$', self.article_preview_view_class.as_view(), name='preview'),
url('^(?P<path>.+/|)_history/$', self.article_history_view_class.as_view(), name='history'),
url('^(?P<path>.+/|)_dir/$', self.article_dir_view_class.as_view(), name='dir'),
url('^(?P<path>.+/|)_settings/$', self.article_settings_view_class.as_view(), name='settings'),
url('^(?P<path>.+/|)_source/$', self.article_source_view_class.as_view(), name='source'),
url('^(?P<path>.+/|)_revision/change/(?P<revision_id>\d+)/$', self.revision_change_view.as_view(), name='change_revision'),
url('^(?P<path>.+/|)_revision/merge/(?P<revision_id>\d+)/$', self.revision_merge_view, name='merge_revision'),
url('^(?P<path>.+/|)_plugin/(?P<slug>\w+)/$', self.article_plugin_view_class.as_view(), name='plugin'),
# This should always go last!
url('^(?P<path>.+/|)$', self.article_view_class.as_view(), name='get'),
)
return urlpatterns
def get_plugin_urls(self):
urlpatterns = patterns('',)
for plugin in registry.get_plugins().values():
slug = getattr(plugin, 'slug', None)
if slug:
article_urlpatterns = plugin.urlpatterns.get('article', [])
urlpatterns += patterns('',
url('^(?P<article_id>\d+)/plugin/' + slug + '/', include(article_urlpatterns)),
url('^(?P<path>.+/|)_plugin/' + slug + '/', include(article_urlpatterns)),
)
root_urlpatterns = plugin.urlpatterns.get('root', [])
urlpatterns += patterns('',
url('^_plugin/' + slug + '/', include(root_urlpatterns)),
)
return urlpatterns
def get_pattern(app_name="wiki", namespace="wiki", url_config_class=None):
"""Every url resolution takes place as "wiki:view_name".
You should not attempt to have multiple deployments of the wiki in a
single Django project.
https://docs.djangoproject.com/en/dev/topics/http/urls/#topics-http-reversing-url-namespaces
"""
if url_config_class is None:
url_config_classname=getattr(settings, 'URL_CONFIG_CLASS', None)
if url_config_classname is None:
url_config_class = WikiURLPatterns
else:
url_config_class = get_class_from_str(url_config_classname)
urlpatterns = url_config_class().get_urls()
return urlpatterns, app_name, namespace
######################
# PLUGINS
######################
from wiki.core.plugins.loader import load_wiki_plugins
load_wiki_plugins()
| gpl-3.0 | 7,259,859,571,351,220,000 | 50.597484 | 175 | 0.60312 | false |
lampwins/netbox | netbox/dcim/urls.py | 1 | 26410 | from django.urls import path
from extras.views import ObjectChangeLogView, ImageAttachmentEditView
from ipam.views import ServiceCreateView
from secrets.views import secret_add
from . import views
from .models import (
Cable, ConsolePort, ConsoleServerPort, Device, DeviceRole, DeviceType, FrontPort, Interface, Manufacturer, Platform,
PowerFeed, PowerPanel, PowerPort, PowerOutlet, Rack, RackGroup, RackReservation, RackRole, RearPort, Region, Site,
VirtualChassis,
)
app_name = 'dcim'
urlpatterns = [
# Regions
path(r'regions/', views.RegionListView.as_view(), name='region_list'),
path(r'regions/add/', views.RegionCreateView.as_view(), name='region_add'),
path(r'regions/import/', views.RegionBulkImportView.as_view(), name='region_import'),
path(r'regions/delete/', views.RegionBulkDeleteView.as_view(), name='region_bulk_delete'),
path(r'regions/<int:pk>/edit/', views.RegionEditView.as_view(), name='region_edit'),
path(r'regions/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='region_changelog', kwargs={'model': Region}),
# Sites
path(r'sites/', views.SiteListView.as_view(), name='site_list'),
path(r'sites/add/', views.SiteCreateView.as_view(), name='site_add'),
path(r'sites/import/', views.SiteBulkImportView.as_view(), name='site_import'),
path(r'sites/edit/', views.SiteBulkEditView.as_view(), name='site_bulk_edit'),
path(r'sites/delete/', views.SiteBulkDeleteView.as_view(), name='site_bulk_delete'),
path(r'sites/<slug:slug>/', views.SiteView.as_view(), name='site'),
path(r'sites/<slug:slug>/edit/', views.SiteEditView.as_view(), name='site_edit'),
path(r'sites/<slug:slug>/delete/', views.SiteDeleteView.as_view(), name='site_delete'),
path(r'sites/<slug:slug>/changelog/', ObjectChangeLogView.as_view(), name='site_changelog', kwargs={'model': Site}),
path(r'sites/<int:object_id>/images/add/', ImageAttachmentEditView.as_view(), name='site_add_image', kwargs={'model': Site}),
# Rack groups
path(r'rack-groups/', views.RackGroupListView.as_view(), name='rackgroup_list'),
path(r'rack-groups/add/', views.RackGroupCreateView.as_view(), name='rackgroup_add'),
path(r'rack-groups/import/', views.RackGroupBulkImportView.as_view(), name='rackgroup_import'),
path(r'rack-groups/delete/', views.RackGroupBulkDeleteView.as_view(), name='rackgroup_bulk_delete'),
path(r'rack-groups/<int:pk>/edit/', views.RackGroupEditView.as_view(), name='rackgroup_edit'),
path(r'rack-groups/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='rackgroup_changelog', kwargs={'model': RackGroup}),
# Rack roles
path(r'rack-roles/', views.RackRoleListView.as_view(), name='rackrole_list'),
path(r'rack-roles/add/', views.RackRoleCreateView.as_view(), name='rackrole_add'),
path(r'rack-roles/import/', views.RackRoleBulkImportView.as_view(), name='rackrole_import'),
path(r'rack-roles/delete/', views.RackRoleBulkDeleteView.as_view(), name='rackrole_bulk_delete'),
path(r'rack-roles/<int:pk>/edit/', views.RackRoleEditView.as_view(), name='rackrole_edit'),
path(r'rack-roles/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='rackrole_changelog', kwargs={'model': RackRole}),
# Rack reservations
path(r'rack-reservations/', views.RackReservationListView.as_view(), name='rackreservation_list'),
path(r'rack-reservations/edit/', views.RackReservationBulkEditView.as_view(), name='rackreservation_bulk_edit'),
path(r'rack-reservations/delete/', views.RackReservationBulkDeleteView.as_view(), name='rackreservation_bulk_delete'),
path(r'rack-reservations/<int:pk>/edit/', views.RackReservationEditView.as_view(), name='rackreservation_edit'),
path(r'rack-reservations/<int:pk>/delete/', views.RackReservationDeleteView.as_view(), name='rackreservation_delete'),
path(r'rack-reservations/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='rackreservation_changelog', kwargs={'model': RackReservation}),
# Racks
path(r'racks/', views.RackListView.as_view(), name='rack_list'),
path(r'rack-elevations/', views.RackElevationListView.as_view(), name='rack_elevation_list'),
path(r'racks/add/', views.RackEditView.as_view(), name='rack_add'),
path(r'racks/import/', views.RackBulkImportView.as_view(), name='rack_import'),
path(r'racks/edit/', views.RackBulkEditView.as_view(), name='rack_bulk_edit'),
path(r'racks/delete/', views.RackBulkDeleteView.as_view(), name='rack_bulk_delete'),
path(r'racks/<int:pk>/', views.RackView.as_view(), name='rack'),
path(r'racks/<int:pk>/edit/', views.RackEditView.as_view(), name='rack_edit'),
path(r'racks/<int:pk>/delete/', views.RackDeleteView.as_view(), name='rack_delete'),
path(r'racks/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='rack_changelog', kwargs={'model': Rack}),
path(r'racks/<int:rack>/reservations/add/', views.RackReservationCreateView.as_view(), name='rack_add_reservation'),
path(r'racks/<int:object_id>/images/add/', ImageAttachmentEditView.as_view(), name='rack_add_image', kwargs={'model': Rack}),
# Manufacturers
path(r'manufacturers/', views.ManufacturerListView.as_view(), name='manufacturer_list'),
path(r'manufacturers/add/', views.ManufacturerCreateView.as_view(), name='manufacturer_add'),
path(r'manufacturers/import/', views.ManufacturerBulkImportView.as_view(), name='manufacturer_import'),
path(r'manufacturers/delete/', views.ManufacturerBulkDeleteView.as_view(), name='manufacturer_bulk_delete'),
path(r'manufacturers/<slug:slug>/edit/', views.ManufacturerEditView.as_view(), name='manufacturer_edit'),
path(r'manufacturers/<slug:slug>/changelog/', ObjectChangeLogView.as_view(), name='manufacturer_changelog', kwargs={'model': Manufacturer}),
# Device types
path(r'device-types/', views.DeviceTypeListView.as_view(), name='devicetype_list'),
path(r'device-types/add/', views.DeviceTypeCreateView.as_view(), name='devicetype_add'),
path(r'device-types/import/', views.DeviceTypeBulkImportView.as_view(), name='devicetype_import'),
path(r'device-types/edit/', views.DeviceTypeBulkEditView.as_view(), name='devicetype_bulk_edit'),
path(r'device-types/delete/', views.DeviceTypeBulkDeleteView.as_view(), name='devicetype_bulk_delete'),
path(r'device-types/<int:pk>/', views.DeviceTypeView.as_view(), name='devicetype'),
path(r'device-types/<int:pk>/edit/', views.DeviceTypeEditView.as_view(), name='devicetype_edit'),
path(r'device-types/<int:pk>/delete/', views.DeviceTypeDeleteView.as_view(), name='devicetype_delete'),
path(r'device-types/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='devicetype_changelog', kwargs={'model': DeviceType}),
# Console port templates
path(r'device-types/<int:pk>/console-ports/add/', views.ConsolePortTemplateCreateView.as_view(), name='devicetype_add_consoleport'),
path(r'device-types/<int:pk>/console-ports/delete/', views.ConsolePortTemplateBulkDeleteView.as_view(), name='devicetype_delete_consoleport'),
# Console server port templates
path(r'device-types/<int:pk>/console-server-ports/add/', views.ConsoleServerPortTemplateCreateView.as_view(), name='devicetype_add_consoleserverport'),
path(r'device-types/<int:pk>/console-server-ports/delete/', views.ConsoleServerPortTemplateBulkDeleteView.as_view(), name='devicetype_delete_consoleserverport'),
# Power port templates
path(r'device-types/<int:pk>/power-ports/add/', views.PowerPortTemplateCreateView.as_view(), name='devicetype_add_powerport'),
path(r'device-types/<int:pk>/power-ports/delete/', views.PowerPortTemplateBulkDeleteView.as_view(), name='devicetype_delete_powerport'),
# Power outlet templates
path(r'device-types/<int:pk>/power-outlets/add/', views.PowerOutletTemplateCreateView.as_view(), name='devicetype_add_poweroutlet'),
path(r'device-types/<int:pk>/power-outlets/delete/', views.PowerOutletTemplateBulkDeleteView.as_view(), name='devicetype_delete_poweroutlet'),
# Interface templates
path(r'device-types/<int:pk>/interfaces/add/', views.InterfaceTemplateCreateView.as_view(), name='devicetype_add_interface'),
path(r'device-types/<int:pk>/interfaces/edit/', views.InterfaceTemplateBulkEditView.as_view(), name='devicetype_bulkedit_interface'),
path(r'device-types/<int:pk>/interfaces/delete/', views.InterfaceTemplateBulkDeleteView.as_view(), name='devicetype_delete_interface'),
# Front port templates
path(r'device-types/<int:pk>/front-ports/add/', views.FrontPortTemplateCreateView.as_view(), name='devicetype_add_frontport'),
path(r'device-types/<int:pk>/front-ports/delete/', views.FrontPortTemplateBulkDeleteView.as_view(), name='devicetype_delete_frontport'),
# Rear port templates
path(r'device-types/<int:pk>/rear-ports/add/', views.RearPortTemplateCreateView.as_view(), name='devicetype_add_rearport'),
path(r'device-types/<int:pk>/rear-ports/delete/', views.RearPortTemplateBulkDeleteView.as_view(), name='devicetype_delete_rearport'),
# Device bay templates
path(r'device-types/<int:pk>/device-bays/add/', views.DeviceBayTemplateCreateView.as_view(), name='devicetype_add_devicebay'),
path(r'device-types/<int:pk>/device-bays/delete/', views.DeviceBayTemplateBulkDeleteView.as_view(), name='devicetype_delete_devicebay'),
# Device roles
path(r'device-roles/', views.DeviceRoleListView.as_view(), name='devicerole_list'),
path(r'device-roles/add/', views.DeviceRoleCreateView.as_view(), name='devicerole_add'),
path(r'device-roles/import/', views.DeviceRoleBulkImportView.as_view(), name='devicerole_import'),
path(r'device-roles/delete/', views.DeviceRoleBulkDeleteView.as_view(), name='devicerole_bulk_delete'),
path(r'device-roles/<slug:slug>/edit/', views.DeviceRoleEditView.as_view(), name='devicerole_edit'),
path(r'device-roles/<slug:slug>/changelog/', ObjectChangeLogView.as_view(), name='devicerole_changelog', kwargs={'model': DeviceRole}),
# Platforms
path(r'platforms/', views.PlatformListView.as_view(), name='platform_list'),
path(r'platforms/add/', views.PlatformCreateView.as_view(), name='platform_add'),
path(r'platforms/import/', views.PlatformBulkImportView.as_view(), name='platform_import'),
path(r'platforms/delete/', views.PlatformBulkDeleteView.as_view(), name='platform_bulk_delete'),
path(r'platforms/<slug:slug>/edit/', views.PlatformEditView.as_view(), name='platform_edit'),
path(r'platforms/<slug:slug>/changelog/', ObjectChangeLogView.as_view(), name='platform_changelog', kwargs={'model': Platform}),
# Devices
path(r'devices/', views.DeviceListView.as_view(), name='device_list'),
path(r'devices/add/', views.DeviceCreateView.as_view(), name='device_add'),
path(r'devices/import/', views.DeviceBulkImportView.as_view(), name='device_import'),
path(r'devices/import/child-devices/', views.ChildDeviceBulkImportView.as_view(), name='device_import_child'),
path(r'devices/edit/', views.DeviceBulkEditView.as_view(), name='device_bulk_edit'),
path(r'devices/delete/', views.DeviceBulkDeleteView.as_view(), name='device_bulk_delete'),
path(r'devices/<int:pk>/', views.DeviceView.as_view(), name='device'),
path(r'devices/<int:pk>/edit/', views.DeviceEditView.as_view(), name='device_edit'),
path(r'devices/<int:pk>/delete/', views.DeviceDeleteView.as_view(), name='device_delete'),
path(r'devices/<int:pk>/config-context/', views.DeviceConfigContextView.as_view(), name='device_configcontext'),
path(r'devices/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='device_changelog', kwargs={'model': Device}),
path(r'devices/<int:pk>/inventory/', views.DeviceInventoryView.as_view(), name='device_inventory'),
path(r'devices/<int:pk>/status/', views.DeviceStatusView.as_view(), name='device_status'),
path(r'devices/<int:pk>/lldp-neighbors/', views.DeviceLLDPNeighborsView.as_view(), name='device_lldp_neighbors'),
path(r'devices/<int:pk>/config/', views.DeviceConfigView.as_view(), name='device_config'),
path(r'devices/<int:pk>/add-secret/', secret_add, name='device_addsecret'),
path(r'devices/<int:device>/services/assign/', ServiceCreateView.as_view(), name='device_service_assign'),
path(r'devices/<int:object_id>/images/add/', ImageAttachmentEditView.as_view(), name='device_add_image', kwargs={'model': Device}),
# Console ports
path(r'devices/console-ports/add/', views.DeviceBulkAddConsolePortView.as_view(), name='device_bulk_add_consoleport'),
path(r'devices/<int:pk>/console-ports/add/', views.ConsolePortCreateView.as_view(), name='consoleport_add'),
path(r'devices/<int:pk>/console-ports/delete/', views.ConsolePortBulkDeleteView.as_view(), name='consoleport_bulk_delete'),
path(r'console-ports/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='consoleport_connect', kwargs={'termination_a_type': ConsolePort}),
path(r'console-ports/<int:pk>/edit/', views.ConsolePortEditView.as_view(), name='consoleport_edit'),
path(r'console-ports/<int:pk>/delete/', views.ConsolePortDeleteView.as_view(), name='consoleport_delete'),
path(r'console-ports/<int:pk>/trace/', views.CableTraceView.as_view(), name='consoleport_trace', kwargs={'model': ConsolePort}),
# Console server ports
path(r'devices/console-server-ports/add/', views.DeviceBulkAddConsoleServerPortView.as_view(), name='device_bulk_add_consoleserverport'),
path(r'devices/<int:pk>/console-server-ports/add/', views.ConsoleServerPortCreateView.as_view(), name='consoleserverport_add'),
path(r'devices/<int:pk>/console-server-ports/edit/', views.ConsoleServerPortBulkEditView.as_view(), name='consoleserverport_bulk_edit'),
path(r'devices/<int:pk>/console-server-ports/delete/', views.ConsoleServerPortBulkDeleteView.as_view(), name='consoleserverport_bulk_delete'),
path(r'console-server-ports/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='consoleserverport_connect', kwargs={'termination_a_type': ConsoleServerPort}),
path(r'console-server-ports/<int:pk>/edit/', views.ConsoleServerPortEditView.as_view(), name='consoleserverport_edit'),
path(r'console-server-ports/<int:pk>/delete/', views.ConsoleServerPortDeleteView.as_view(), name='consoleserverport_delete'),
path(r'console-server-ports/<int:pk>/trace/', views.CableTraceView.as_view(), name='consoleserverport_trace', kwargs={'model': ConsoleServerPort}),
path(r'console-server-ports/rename/', views.ConsoleServerPortBulkRenameView.as_view(), name='consoleserverport_bulk_rename'),
path(r'console-server-ports/disconnect/', views.ConsoleServerPortBulkDisconnectView.as_view(), name='consoleserverport_bulk_disconnect'),
# Power ports
path(r'devices/power-ports/add/', views.DeviceBulkAddPowerPortView.as_view(), name='device_bulk_add_powerport'),
path(r'devices/<int:pk>/power-ports/add/', views.PowerPortCreateView.as_view(), name='powerport_add'),
path(r'devices/<int:pk>/power-ports/delete/', views.PowerPortBulkDeleteView.as_view(), name='powerport_bulk_delete'),
path(r'power-ports/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='powerport_connect', kwargs={'termination_a_type': PowerPort}),
path(r'power-ports/<int:pk>/edit/', views.PowerPortEditView.as_view(), name='powerport_edit'),
path(r'power-ports/<int:pk>/delete/', views.PowerPortDeleteView.as_view(), name='powerport_delete'),
path(r'power-ports/<int:pk>/trace/', views.CableTraceView.as_view(), name='powerport_trace', kwargs={'model': PowerPort}),
# Power outlets
path(r'devices/power-outlets/add/', views.DeviceBulkAddPowerOutletView.as_view(), name='device_bulk_add_poweroutlet'),
path(r'devices/<int:pk>/power-outlets/add/', views.PowerOutletCreateView.as_view(), name='poweroutlet_add'),
path(r'devices/<int:pk>/power-outlets/edit/', views.PowerOutletBulkEditView.as_view(), name='poweroutlet_bulk_edit'),
path(r'devices/<int:pk>/power-outlets/delete/', views.PowerOutletBulkDeleteView.as_view(), name='poweroutlet_bulk_delete'),
path(r'power-outlets/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='poweroutlet_connect', kwargs={'termination_a_type': PowerOutlet}),
path(r'power-outlets/<int:pk>/edit/', views.PowerOutletEditView.as_view(), name='poweroutlet_edit'),
path(r'power-outlets/<int:pk>/delete/', views.PowerOutletDeleteView.as_view(), name='poweroutlet_delete'),
path(r'power-outlets/<int:pk>/trace/', views.CableTraceView.as_view(), name='poweroutlet_trace', kwargs={'model': PowerOutlet}),
path(r'power-outlets/rename/', views.PowerOutletBulkRenameView.as_view(), name='poweroutlet_bulk_rename'),
path(r'power-outlets/disconnect/', views.PowerOutletBulkDisconnectView.as_view(), name='poweroutlet_bulk_disconnect'),
# Interfaces
path(r'devices/interfaces/add/', views.DeviceBulkAddInterfaceView.as_view(), name='device_bulk_add_interface'),
path(r'devices/<int:pk>/interfaces/add/', views.InterfaceCreateView.as_view(), name='interface_add'),
path(r'devices/<int:pk>/interfaces/edit/', views.InterfaceBulkEditView.as_view(), name='interface_bulk_edit'),
path(r'devices/<int:pk>/interfaces/delete/', views.InterfaceBulkDeleteView.as_view(), name='interface_bulk_delete'),
path(r'interfaces/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='interface_connect', kwargs={'termination_a_type': Interface}),
path(r'interfaces/<int:pk>/', views.InterfaceView.as_view(), name='interface'),
path(r'interfaces/<int:pk>/edit/', views.InterfaceEditView.as_view(), name='interface_edit'),
path(r'interfaces/<int:pk>/assign-vlans/', views.InterfaceAssignVLANsView.as_view(), name='interface_assign_vlans'),
path(r'interfaces/<int:pk>/delete/', views.InterfaceDeleteView.as_view(), name='interface_delete'),
path(r'interfaces/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='interface_changelog', kwargs={'model': Interface}),
path(r'interfaces/<int:pk>/trace/', views.CableTraceView.as_view(), name='interface_trace', kwargs={'model': Interface}),
path(r'interfaces/rename/', views.InterfaceBulkRenameView.as_view(), name='interface_bulk_rename'),
path(r'interfaces/disconnect/', views.InterfaceBulkDisconnectView.as_view(), name='interface_bulk_disconnect'),
# Front ports
# path(r'devices/front-ports/add/', views.DeviceBulkAddFrontPortView.as_view(), name='device_bulk_add_frontport'),
path(r'devices/<int:pk>/front-ports/add/', views.FrontPortCreateView.as_view(), name='frontport_add'),
path(r'devices/<int:pk>/front-ports/edit/', views.FrontPortBulkEditView.as_view(), name='frontport_bulk_edit'),
path(r'devices/<int:pk>/front-ports/delete/', views.FrontPortBulkDeleteView.as_view(), name='frontport_bulk_delete'),
path(r'front-ports/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='frontport_connect', kwargs={'termination_a_type': FrontPort}),
path(r'front-ports/<int:pk>/edit/', views.FrontPortEditView.as_view(), name='frontport_edit'),
path(r'front-ports/<int:pk>/delete/', views.FrontPortDeleteView.as_view(), name='frontport_delete'),
path(r'front-ports/<int:pk>/trace/', views.CableTraceView.as_view(), name='frontport_trace', kwargs={'model': FrontPort}),
path(r'front-ports/rename/', views.FrontPortBulkRenameView.as_view(), name='frontport_bulk_rename'),
path(r'front-ports/disconnect/', views.FrontPortBulkDisconnectView.as_view(), name='frontport_bulk_disconnect'),
# Rear ports
# path(r'devices/rear-ports/add/', views.DeviceBulkAddRearPortView.as_view(), name='device_bulk_add_rearport'),
path(r'devices/<int:pk>/rear-ports/add/', views.RearPortCreateView.as_view(), name='rearport_add'),
path(r'devices/<int:pk>/rear-ports/edit/', views.RearPortBulkEditView.as_view(), name='rearport_bulk_edit'),
path(r'devices/<int:pk>/rear-ports/delete/', views.RearPortBulkDeleteView.as_view(), name='rearport_bulk_delete'),
path(r'rear-ports/<int:termination_a_id>/connect/<str:termination_b_type>/', views.CableCreateView.as_view(), name='rearport_connect', kwargs={'termination_a_type': RearPort}),
path(r'rear-ports/<int:pk>/edit/', views.RearPortEditView.as_view(), name='rearport_edit'),
path(r'rear-ports/<int:pk>/delete/', views.RearPortDeleteView.as_view(), name='rearport_delete'),
path(r'rear-ports/<int:pk>/trace/', views.CableTraceView.as_view(), name='rearport_trace', kwargs={'model': RearPort}),
path(r'rear-ports/rename/', views.RearPortBulkRenameView.as_view(), name='rearport_bulk_rename'),
path(r'rear-ports/disconnect/', views.RearPortBulkDisconnectView.as_view(), name='rearport_bulk_disconnect'),
# Device bays
path(r'devices/device-bays/add/', views.DeviceBulkAddDeviceBayView.as_view(), name='device_bulk_add_devicebay'),
path(r'devices/<int:pk>/bays/add/', views.DeviceBayCreateView.as_view(), name='devicebay_add'),
path(r'devices/<int:pk>/bays/delete/', views.DeviceBayBulkDeleteView.as_view(), name='devicebay_bulk_delete'),
path(r'device-bays/<int:pk>/edit/', views.DeviceBayEditView.as_view(), name='devicebay_edit'),
path(r'device-bays/<int:pk>/delete/', views.DeviceBayDeleteView.as_view(), name='devicebay_delete'),
path(r'device-bays/<int:pk>/populate/', views.DeviceBayPopulateView.as_view(), name='devicebay_populate'),
path(r'device-bays/<int:pk>/depopulate/', views.DeviceBayDepopulateView.as_view(), name='devicebay_depopulate'),
path(r'device-bays/rename/', views.DeviceBayBulkRenameView.as_view(), name='devicebay_bulk_rename'),
# Inventory items
path(r'inventory-items/', views.InventoryItemListView.as_view(), name='inventoryitem_list'),
path(r'inventory-items/import/', views.InventoryItemBulkImportView.as_view(), name='inventoryitem_import'),
path(r'inventory-items/edit/', views.InventoryItemBulkEditView.as_view(), name='inventoryitem_bulk_edit'),
path(r'inventory-items/delete/', views.InventoryItemBulkDeleteView.as_view(), name='inventoryitem_bulk_delete'),
path(r'inventory-items/<int:pk>/edit/', views.InventoryItemEditView.as_view(), name='inventoryitem_edit'),
path(r'inventory-items/<int:pk>/delete/', views.InventoryItemDeleteView.as_view(), name='inventoryitem_delete'),
path(r'devices/<int:device>/inventory-items/add/', views.InventoryItemEditView.as_view(), name='inventoryitem_add'),
# Cables
path(r'cables/', views.CableListView.as_view(), name='cable_list'),
path(r'cables/import/', views.CableBulkImportView.as_view(), name='cable_import'),
path(r'cables/edit/', views.CableBulkEditView.as_view(), name='cable_bulk_edit'),
path(r'cables/delete/', views.CableBulkDeleteView.as_view(), name='cable_bulk_delete'),
path(r'cables/<int:pk>/', views.CableView.as_view(), name='cable'),
path(r'cables/<int:pk>/edit/', views.CableEditView.as_view(), name='cable_edit'),
path(r'cables/<int:pk>/delete/', views.CableDeleteView.as_view(), name='cable_delete'),
path(r'cables/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='cable_changelog', kwargs={'model': Cable}),
# Console/power/interface connections (read-only)
path(r'console-connections/', views.ConsoleConnectionsListView.as_view(), name='console_connections_list'),
path(r'power-connections/', views.PowerConnectionsListView.as_view(), name='power_connections_list'),
path(r'interface-connections/', views.InterfaceConnectionsListView.as_view(), name='interface_connections_list'),
# Virtual chassis
path(r'virtual-chassis/', views.VirtualChassisListView.as_view(), name='virtualchassis_list'),
path(r'virtual-chassis/add/', views.VirtualChassisCreateView.as_view(), name='virtualchassis_add'),
path(r'virtual-chassis/<int:pk>/edit/', views.VirtualChassisEditView.as_view(), name='virtualchassis_edit'),
path(r'virtual-chassis/<int:pk>/delete/', views.VirtualChassisDeleteView.as_view(), name='virtualchassis_delete'),
path(r'virtual-chassis/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='virtualchassis_changelog', kwargs={'model': VirtualChassis}),
path(r'virtual-chassis/<int:pk>/add-member/', views.VirtualChassisAddMemberView.as_view(), name='virtualchassis_add_member'),
path(r'virtual-chassis-members/<int:pk>/delete/', views.VirtualChassisRemoveMemberView.as_view(), name='virtualchassis_remove_member'),
# Power panels
path(r'power-panels/', views.PowerPanelListView.as_view(), name='powerpanel_list'),
path(r'power-panels/add/', views.PowerPanelCreateView.as_view(), name='powerpanel_add'),
path(r'power-panels/import/', views.PowerPanelBulkImportView.as_view(), name='powerpanel_import'),
path(r'power-panels/delete/', views.PowerPanelBulkDeleteView.as_view(), name='powerpanel_bulk_delete'),
path(r'power-panels/<int:pk>/', views.PowerPanelView.as_view(), name='powerpanel'),
path(r'power-panels/<int:pk>/edit/', views.PowerPanelEditView.as_view(), name='powerpanel_edit'),
path(r'power-panels/<int:pk>/delete/', views.PowerPanelDeleteView.as_view(), name='powerpanel_delete'),
path(r'power-panels/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='powerpanel_changelog', kwargs={'model': PowerPanel}),
# Power feeds
path(r'power-feeds/', views.PowerFeedListView.as_view(), name='powerfeed_list'),
path(r'power-feeds/add/', views.PowerFeedEditView.as_view(), name='powerfeed_add'),
path(r'power-feeds/import/', views.PowerFeedBulkImportView.as_view(), name='powerfeed_import'),
path(r'power-feeds/edit/', views.PowerFeedBulkEditView.as_view(), name='powerfeed_bulk_edit'),
path(r'power-feeds/delete/', views.PowerFeedBulkDeleteView.as_view(), name='powerfeed_bulk_delete'),
path(r'power-feeds/<int:pk>/', views.PowerFeedView.as_view(), name='powerfeed'),
path(r'power-feeds/<int:pk>/edit/', views.PowerFeedEditView.as_view(), name='powerfeed_edit'),
path(r'power-feeds/<int:pk>/delete/', views.PowerFeedDeleteView.as_view(), name='powerfeed_delete'),
path(r'power-feeds/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='powerfeed_changelog', kwargs={'model': PowerFeed}),
]
| apache-2.0 | -6,724,790,281,098,075,000 | 85.026059 | 208 | 0.726732 | false |
insomnia-lab/calibre | src/calibre/ebooks/metadata/book/render.py | 1 | 8008 | #!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from functools import partial
from calibre import prepare_string_for_xml, force_unicode
from calibre.ebooks.metadata import fmt_sidx
from calibre.ebooks.metadata.sources.identify import urls_from_identifiers
from calibre.constants import filesystem_encoding
from calibre.library.comments import comments_to_html
from calibre.utils.icu import sort_key
from calibre.utils.formatter import EvalFormatter
from calibre.utils.date import is_date_undefined
from calibre.utils.localization import calibre_langcode_to_name
default_sort = ('title', 'title_sort', 'authors', 'author_sort', 'series', 'rating', 'pubdate', 'tags', 'publisher', 'identifiers')
def field_sort(mi, name):
try:
title = mi.metadata_for_field(name)['name']
except:
title = 'zzz'
return {x:(i, None) for i, x in enumerate(default_sort)}.get(name, (10000, sort_key(title)))
def displayable_field_keys(mi):
for k in mi.all_field_keys():
try:
m = mi.metadata_for_field(k)
except:
continue
if (
m is not None and m['kind'] == 'field' and m['datatype'] is not None and
k not in ('au_map', 'marked', 'ondevice', 'cover', 'series_sort') and
not k.endswith('_index')
):
yield k
def get_field_list(mi):
for field in sorted(displayable_field_keys(mi), key=partial(field_sort, mi)):
yield field, True
def mi_to_html(mi, field_list=None, default_author_link=None, use_roman_numbers=True, rating_font='Liberation Serif'):
if field_list is None:
field_list = get_field_list(mi)
ans = []
comment_fields = []
isdevice = not hasattr(mi, 'id')
row = u'<td class="title">%s</td><td class="value">%s</td>'
p = prepare_string_for_xml
a = partial(prepare_string_for_xml, attribute=True)
for field in (field for field, display in field_list if display):
try:
metadata = mi.metadata_for_field(field)
except:
continue
if not metadata:
continue
if field == 'sort':
field = 'title_sort'
if metadata['datatype'] == 'bool':
isnull = mi.get(field) is None
else:
isnull = mi.is_null(field)
if isnull:
continue
name = metadata['name']
if not name:
name = field
name += ':'
if metadata['datatype'] == 'comments' or field == 'comments':
val = getattr(mi, field)
if val:
val = force_unicode(val)
comment_fields.append(comments_to_html(val))
elif metadata['datatype'] == 'rating':
val = getattr(mi, field)
if val:
val = val/2.0
ans.append((field,
u'<td class="title">%s</td><td class="rating value" '
'style=\'font-family:"%s"\'>%s</td>'%(
name, rating_font, u'\u2605'*int(val))))
elif metadata['datatype'] == 'composite' and \
metadata['display'].get('contains_html', False):
val = getattr(mi, field)
if val:
val = force_unicode(val)
ans.append((field,
row % (name, comments_to_html(val))))
elif field == 'path':
if mi.path:
path = force_unicode(mi.path, filesystem_encoding)
scheme = u'devpath' if isdevice else u'path'
url = prepare_string_for_xml(path if isdevice else
unicode(mi.id), True)
pathstr = _('Click to open')
extra = ''
if isdevice:
durl = url
if durl.startswith('mtp:::'):
durl = ':::'.join((durl.split(':::'))[2:])
extra = '<br><span style="font-size:smaller">%s</span>'%(
prepare_string_for_xml(durl))
link = u'<a href="%s:%s" title="%s">%s</a>%s' % (scheme, url,
prepare_string_for_xml(path, True), pathstr, extra)
ans.append((field, row % (name, link)))
elif field == 'formats':
if isdevice:
continue
path = ''
if mi.path:
h, t = os.path.split(mi.path)
path = '/'.join((os.path.basename(h), t))
data = ({
'fmt':x, 'path':a(path or ''), 'fname':a(mi.format_files.get(x, '')),
'ext':x.lower(), 'id':mi.id
} for x in mi.formats)
fmts = [u'<a title="{path}/{fname}.{ext}" href="format:{id}:{fmt}">{fmt}</a>'.format(**x) for x in data]
ans.append((field, row % (name, u', '.join(fmts))))
elif field == 'identifiers':
urls = urls_from_identifiers(mi.identifiers)
links = [u'<a href="%s" title="%s:%s">%s</a>' % (a(url), a(id_typ), a(id_val), p(name))
for name, id_typ, id_val, url in urls]
links = u', '.join(links)
if links:
ans.append((field, row % (_('Ids')+':', links)))
elif field == 'authors' and not isdevice:
authors = []
formatter = EvalFormatter()
for aut in mi.authors:
link = ''
if mi.author_link_map[aut]:
link = mi.author_link_map[aut]
elif default_author_link:
vals = {'author': aut.replace(' ', '+')}
try:
vals['author_sort'] = mi.author_sort_map[aut].replace(' ', '+')
except:
vals['author_sort'] = aut.replace(' ', '+')
link = formatter.safe_format(
default_author_link, vals, '', vals)
aut = p(aut)
if link:
authors.append(u'<a calibre-data="authors" title="%s" href="%s">%s</a>'%(a(link), a(link), aut))
else:
authors.append(aut)
ans.append((field, row % (name, u' & '.join(authors))))
elif field == 'languages':
if not mi.languages:
continue
names = filter(None, map(calibre_langcode_to_name, mi.languages))
ans.append((field, row % (name, u', '.join(names))))
else:
val = mi.format_field(field)[-1]
if val is None:
continue
val = p(val)
if metadata['datatype'] == 'series':
sidx = mi.get(field+'_index')
if sidx is None:
sidx = 1.0
val = _('Book %(sidx)s of <span class="series_name">%(series)s</span>')%dict(
sidx=fmt_sidx(sidx, use_roman=use_roman_numbers),
series=p(getattr(mi, field)))
elif metadata['datatype'] == 'datetime':
aval = getattr(mi, field)
if is_date_undefined(aval):
continue
ans.append((field, row % (name, val)))
dc = getattr(mi, 'device_collections', [])
if dc:
dc = u', '.join(sorted(dc, key=sort_key))
ans.append(('device_collections',
row % (_('Collections')+':', dc)))
def classname(field):
try:
dt = mi.metadata_for_field(field)['datatype']
except:
dt = 'text'
return 'datatype_%s'%dt
ans = [u'<tr id="%s" class="%s">%s</tr>'%(field.replace('#', '_'),
classname(field), html) for field, html in ans]
# print '\n'.join(ans)
return u'<table class="fields">%s</table>'%(u'\n'.join(ans)), comment_fields
| gpl-3.0 | 1,372,773,711,632,978,400 | 39.649746 | 131 | 0.49975 | false |
pferreir/indico-backup | indico/web/flask/blueprints/legacy.py | 1 | 21964 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico. If not, see <http://www.gnu.org/licenses/>.
import MaKaC.webinterface.rh.xmlGateway as mod_rh_xmlGateway
from indico.web.flask.wrappers import IndicoBlueprint
legacy = IndicoBlueprint('legacy', __name__)
# Routes for xmlGateway.py
legacy.add_url_rule('/xmlGateway.py',
'xmlGateway',
mod_rh_xmlGateway.RHLoginStatus,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/getCategoryInfo',
'xmlGateway-getCategoryInfo',
mod_rh_xmlGateway.RHCategInfo,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/getStatsIndico',
'xmlGateway-getStatsIndico',
mod_rh_xmlGateway.RHStatsIndico,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/loginStatus',
'xmlGateway-loginStatus',
mod_rh_xmlGateway.RHLoginStatus,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/signIn',
'xmlGateway-signIn',
mod_rh_xmlGateway.RHSignIn,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/signOut',
'xmlGateway-signOut',
mod_rh_xmlGateway.RHSignOut,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/webcastForthcomingEvents',
'xmlGateway-webcastForthcomingEvents',
mod_rh_xmlGateway.RHWebcastForthcomingEvents,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/webcastOnAir',
'xmlGateway-webcastOnAir',
mod_rh_xmlGateway.RHWebcastOnAir,
methods=('GET', 'POST'))
# Legacy endpoints defined in htdocs/*.py files (which need compatibility routes)
# Note: When removing/renaming endpoints, feel free to remove them in here, too, but
# it's not absolutely necessary - if there's no non-legacy endpoint with that name
# the entry in here simply does nothing.
legacy_endpoints = {
'about', 'abstractDisplay', 'abstractDisplay-getAttachedFile', 'abstractDisplay-pdf', 'abstractManagment',
'abstractManagment-abstractToPDF', 'abstractManagment-accept', 'abstractManagment-acceptMultiple',
'abstractManagment-backToSubmitted', 'abstractManagment-changeTrack', 'abstractManagment-comments',
'abstractManagment-directAccess', 'abstractManagment-editComment', 'abstractManagment-editData',
'abstractManagment-markAsDup', 'abstractManagment-mergeInto', 'abstractManagment-newComment',
'abstractManagment-notifLog', 'abstractManagment-orderByRating', 'abstractManagment-propToAcc',
'abstractManagment-propToRej', 'abstractManagment-reject', 'abstractManagment-rejectMultiple',
'abstractManagment-remComment', 'abstractManagment-trackProposal', 'abstractManagment-unMarkAsDup',
'abstractManagment-unmerge', 'abstractManagment-withdraw', 'abstractManagment-xml', 'abstractModify',
'abstractReviewing-notifTpl', 'abstractReviewing-notifTplCondNew', 'abstractReviewing-notifTplCondRem',
'abstractReviewing-notifTplDisplay', 'abstractReviewing-notifTplDown', 'abstractReviewing-notifTplEdit',
'abstractReviewing-notifTplNew', 'abstractReviewing-notifTplPreview', 'abstractReviewing-notifTplRem',
'abstractReviewing-notifTplUp', 'abstractReviewing-reviewingSetup', 'abstractReviewing-reviewingTeam',
'abstractsManagment', 'abstractsManagment-abstractsActions', 'abstractsManagment-mergeAbstracts',
'abstractsManagment-newAbstract', 'abstractsManagment-participantList', 'abstractSubmission',
'abstractSubmission-confirmation', 'abstractTools', 'abstractTools-delete', 'abstractWithdraw',
'abstractWithdraw-recover', 'adminAnnouncement', 'adminAnnouncement-save', 'adminConferenceStyles', 'adminLayout',
'adminLayout-addStyle', 'adminLayout-deleteStyle', 'adminLayout-saveSocial', 'adminLayout-saveTemplateSet',
'adminLayout-setDefaultPDFOptions', 'adminLayout-styles', 'adminList',
'adminList-switchNewsActive', 'adminMaintenance', 'adminMaintenance-pack', 'adminMaintenance-performPack',
'adminMaintenance-performTmpCleanup', 'adminMaintenance-tmpCleanup', 'adminPlugins', 'adminPlugins-clearAllInfo',
'adminPlugins-reload', 'adminPlugins-reloadAll', 'adminPlugins-saveOptionReloadAll',
'adminPlugins-savePluginOptions', 'adminPlugins-savePluginTypeOptions', 'adminPlugins-toggleActive',
'adminPlugins-toggleActivePluginType', 'adminProtection', 'adminServices-analytics', 'adminServices-apiKeys',
'adminServices-apiOptions', 'adminServices-apiOptionsSet', 'adminServices-ipbasedacl',
'adminServices-ipbasedacl_fagrant', 'adminServices-ipbasedacl_farevoke', 'adminServices-oauthAuthorized',
'adminServices-oauthConsumers', 'adminServices-saveAnalytics', 'adminServices-webcast',
'adminServices-webcastAddChannel', 'adminServices-webcastAddOnAir', 'adminServices-webcastAddStream',
'adminServices-webcastAddWebcast', 'adminServices-webcastArchive', 'adminServices-webcastArchiveWebcast',
'adminServices-webcastManualSynchronization', 'adminServices-webcastModifyChannel',
'adminServices-webcastMoveChannelDown', 'adminServices-webcastMoveChannelUp', 'adminServices-webcastRemoveChannel',
'adminServices-webcastRemoveFromAir', 'adminServices-webcastRemoveStream', 'adminServices-webcastRemoveWebcast',
'adminServices-webcastSaveWebcastSynchronizationURL', 'adminServices-webcastSetup',
'adminServices-webcastSwitchChannel', 'adminServices-webcastUnArchiveWebcast', 'adminSystem', 'adminSystem-modify',
'adminUpcomingEvents', 'assignContributions', 'assignContributions-downloadAcceptedPapers', 'badgeTemplates',
'badgeTemplates-badgeDesign', 'badgeTemplates-badgePrinting', 'categoryAC', 'categoryAC-setVisibility',
'categoryConfCreationControl-setCreateConferenceControl', 'categoryConfCreationControl-setNotifyCreation',
'categoryCreation', 'categoryCreation-create', 'categoryDataModification', 'categoryDataModification-modify',
'categoryDataModification-tasksOption', 'categoryDisplay', 'categoryDisplay-atom', 'categoryDisplay-getIcon',
'categoryDisplay-ical', 'categoryDisplay-rss', 'categoryFiles', 'categoryFiles-addMaterial', 'categoryMap',
'categoryModification', 'categoryModification-actionConferences', 'categoryModification-actionSubCategs',
'categoryStatistics', 'categoryTasks', 'categoryTasks-taskAction', 'categoryTools', 'categoryTools-delete',
'categOverview', 'categOverview-rss', 'changeLang', 'confAbstractBook', 'confAuthorIndex', 'confDisplayEvaluation',
'confDisplayEvaluation-display', 'confDisplayEvaluation-modif', 'confDisplayEvaluation-signIn',
'confDisplayEvaluation-submit', 'confDisplayEvaluation-submitted', 'conferenceCFA', 'conferenceCreation',
'conferenceCreation-createConference', 'conferenceDisplay', 'conferenceDisplay-abstractBook',
'conferenceDisplay-abstractBookLatex', 'conferenceDisplay-accessKey', 'conferenceDisplay-getCSS',
'conferenceDisplay-getLogo', 'conferenceDisplay-getPic', 'conferenceDisplay-ical', 'conferenceDisplay-marcxml',
'conferenceDisplay-matPkg', 'conferenceDisplay-next', 'conferenceDisplay-performMatPkg', 'conferenceDisplay-prev',
'conferenceDisplay-xml', 'conferenceModification', 'conferenceModification-addContribType',
'conferenceModification-close', 'conferenceModification-closeModifKey', 'conferenceModification-data',
'conferenceModification-dataPerform', 'conferenceModification-editContribType',
'conferenceModification-managementAccess', 'conferenceModification-materialsAdd',
'conferenceModification-materialsShow', 'conferenceModification-modifKey', 'conferenceModification-open',
'conferenceModification-removeContribType',
'conferenceModification-screenDates', 'conferenceOtherViews', 'conferenceProgram', 'conferenceProgram-pdf',
'conferenceTimeTable', 'conferenceTimeTable-customizePdf', 'conferenceTimeTable-pdf', 'confListContribToJudge',
'confListContribToJudge-asEditor', 'confListContribToJudge-asReviewer', 'confLogin', 'confLogin-active',
'confLogin-disabledAccount', 'confLogin-sendActivation', 'confLogin-sendLogin', 'confLogin-unactivatedAccount',
'confModBOA', 'confModBOA-toogleShowIds', 'confModifAC', 'confModifAC-grantModificationToAllConveners',
'confModifAC-grantSubmissionToAllSpeakers', 'confModifAC-modifySessionCoordRights',
'confModifAC-removeAllSubmissionRights', 'confModifAC-setVisibility', 'confModifCFA', 'confModifCFA-absFieldDown',
'confModifCFA-absFieldUp', 'confModifCFA-abstractFields', 'confModifCFA-changeStatus',
'confModifCFA-makeTracksMandatory', 'confModifCFA-modifyData', 'confModifCFA-performModifyData',
'confModifCFA-preview', 'confModifCFA-removeAbstractField', 'confModifCFA-switchAttachFiles',
'confModifCFA-switchMultipleTracks', 'confModifCFA-switchSelectSpeakerMandatory',
'confModifCFA-switchShowAttachedFiles', 'confModifCFA-switchShowSelectSpeaker', 'confModifContribList',
'confModifContribList-contribQuickAccess', 'confModifContribList-contribsActions',
'confModifContribList-contribsToPDFMenu', 'confModifContribList-matPkg', 'confModifContribList-moveToSession',
'confModifContribList-participantList', 'confModifContribList-proceedings', 'confModifDisplay',
'confModifDisplay-addLink', 'confModifDisplay-addPage', 'confModifDisplay-addSpacer',
'confModifDisplay-confHeader', 'confModifDisplay-custom', 'confModifDisplay-downLink',
'confModifDisplay-formatTitleBgColor', 'confModifDisplay-formatTitleTextColor', 'confModifDisplay-menu',
'confModifDisplay-modifyData', 'confModifDisplay-modifySystemData', 'confModifDisplay-previewCSS',
'confModifDisplay-removeCSS', 'confModifDisplay-removeLink', 'confModifDisplay-removeLogo',
'confModifDisplay-resources', 'confModifDisplay-saveCSS', 'confModifDisplay-saveLogo', 'confModifDisplay-savePic',
'confModifDisplay-tickerTapeAction', 'confModifDisplay-toggleHomePage', 'confModifDisplay-toggleLinkStatus',
'confModifDisplay-toggleNavigationBar', 'confModifDisplay-toggleSearch', 'confModifDisplay-upLink',
'confModifDisplay-useCSS', 'confModifEpayment', 'confModifEpayment-changeStatus', 'confModifEpayment-dataModif',
'confModifEpayment-enableSection', 'confModifEpayment-modifModule', 'confModifEpayment-performDataModif',
'confModifEvaluation', 'confModifEvaluation-changeStatus', 'confModifEvaluation-dataModif',
'confModifEvaluation-edit', 'confModifEvaluation-editPerformChanges', 'confModifEvaluation-performDataModif',
'confModifEvaluation-preview', 'confModifEvaluation-results', 'confModifEvaluation-resultsOptions',
'confModifEvaluation-resultsSubmittersActions', 'confModifEvaluation-setup', 'confModifEvaluation-specialAction',
'confModifListings-allSpeakers', 'confModifListings-allSpeakersAction', 'confModifLog', 'confModifParticipants',
'confModifParticipants-action', 'confModifParticipants-declinedParticipants', 'confModifParticipants-invitation',
'confModifParticipants-pendingParticipants', 'confModifParticipants-refusal', 'confModifParticipants-setup',
'confModifParticipants-statistics', 'confModifPendingQueues', 'confModifPendingQueues-actionConfSubmitters',
'confModifPendingQueues-actionCoordinators', 'confModifPendingQueues-actionManagers',
'confModifPendingQueues-actionSubmitters', 'confModifProgram', 'confModifProgram-addTrack',
'confModifProgram-deleteTracks', 'confModifProgram-moveTrackDown', 'confModifProgram-moveTrackUp',
'confModifProgram-performAddTrack', 'confModifRegistrants', 'confModifRegistrants-action',
'confModifRegistrants-getAttachedFile', 'confModifRegistrants-modification',
'confModifRegistrants-modifyAccommodation', 'confModifRegistrants-modifyMiscInfo',
'confModifRegistrants-modifyReasonParticipation', 'confModifRegistrants-modifySessions',
'confModifRegistrants-modifySocialEvents', 'confModifRegistrants-modifyStatuses',
'confModifRegistrants-modifyTransaction', 'confModifRegistrants-newRegistrant',
'confModifRegistrants-peformModifyTransaction', 'confModifRegistrants-performModifyAccommodation',
'confModifRegistrants-performModifyMiscInfo', 'confModifRegistrants-performModifyReasonParticipation',
'confModifRegistrants-performModifySessions', 'confModifRegistrants-performModifySocialEvents',
'confModifRegistrants-performModifyStatuses', 'confModifRegistrants-remove', 'confModifRegistrationForm',
'confModifRegistrationForm-actionStatuses', 'confModifRegistrationForm-changeStatus',
'confModifRegistrationForm-dataModif', 'confModifRegistrationForm-modifStatus',
'confModifRegistrationForm-performDataModif', 'confModifRegistrationForm-performModifStatus',
'confModifRegistrationPreview', 'confModifReviewing-access', 'confModifReviewing-downloadTemplate',
'confModifReviewing-paperSetup', 'confModifReviewing-setTemplate', 'confModifReviewingControl',
'confModifSchedule', 'confModifSchedule-edit', 'confModifSchedule-reschedule', 'confModifTools',
'confModifTools-addAlarm', 'confModifTools-allSessionsConveners', 'confModifTools-allSessionsConvenersAction',
'confModifTools-badgeDesign', 'confModifTools-badgeGetBackground', 'confModifTools-badgePrinting',
'confModifTools-badgePrintingPDF', 'confModifTools-badgeSaveBackground', 'confModifTools-clone',
'confModifTools-delete', 'confModifTools-deleteAlarm', 'confModifTools-displayAlarm', 'confModifTools-matPkg',
'confModifTools-modifyAlarm', 'confModifTools-offline', 'confModifTools-performCloning',
'confModifTools-performMatPkg', 'confModifTools-posterDesign', 'confModifTools-posterGetBackground',
'confModifTools-posterPrinting', 'confModifTools-posterPrintingPDF', 'confModifTools-posterSaveBackground',
'confModifTools-saveAlarm', 'confModifTools-sendAlarmNow', 'confModifUserCompetences',
'confRegistrantsDisplay-list', 'confRegistrationFormDisplay', 'confRegistrationFormDisplay-conditions',
'confRegistrationFormDisplay-confirmBooking', 'confRegistrationFormDisplay-confirmBookingDone',
'confRegistrationFormDisplay-creation', 'confRegistrationFormDisplay-creationDone',
'confRegistrationFormDisplay-display', 'confRegistrationFormDisplay-modify',
'confRegistrationFormDisplay-performModify', 'confRegistrationFormDisplay-signIn', 'confSpeakerIndex', 'confUser',
'confUser-created', 'confUser-userExists', 'contact', 'contribAuthorDisplay', 'contributionAC',
'contributionAC-setVisibility', 'contributionDisplay', 'contributionDisplay-ical', 'contributionDisplay-marcxml',
'contributionDisplay-pdf', 'contributionDisplay-xml', 'contributionEditingJudgement', 'contributionGiveAdvice',
'contributionListDisplay', 'contributionListDisplay-contributionsToPDF', 'contributionModification',
'contributionModification-browseMaterial', 'contributionModification-data', 'contributionModification-materials',
'contributionModification-materialsAdd', 'contributionModification-modifData', 'contributionModification-pdf',
'contributionModification-setSession', 'contributionModification-setTrack', 'contributionModification-withdraw',
'contributionModification-xml', 'contributionModifSubCont', 'contributionModifSubCont-actionSubContribs',
'contributionModifSubCont-add', 'contributionModifSubCont-create', 'contributionReviewing',
'contributionReviewing-assignEditing', 'contributionReviewing-assignReferee',
'contributionReviewing-assignReviewing', 'contributionReviewing-contributionReviewingJudgements',
'contributionReviewing-contributionReviewingMaterials', 'contributionReviewing-removeAssignEditing',
'contributionReviewing-removeAssignReferee', 'contributionReviewing-removeAssignReviewing',
'contributionReviewing-reviewingHistory', 'contributionTools', 'contributionTools-delete', 'domainCreation',
'domainCreation-create', 'domainDataModification', 'domainDataModification-modify', 'domainDetails', 'domainList',
'EMail', 'EMail-send', 'EMail-sendcontribparticipants', 'EMail-sendconvener', 'EMail-sendreg', 'errors',
'generalInfoModification', 'generalInfoModification-update', 'getConvertedFile', 'getFile-access',
'getFile-accessKey', 'getFile-flash', 'getFile-offlineEvent', 'getFile-wmv', 'groupDetails', 'groupList',
'groupModification', 'groupModification-update', 'groupRegistration', 'groupRegistration-update', 'help',
'identityCreation', 'identityCreation-changePassword', 'identityCreation-create', 'identityCreation-remove',
'index', 'internalPage', 'JSContent-getVars', 'logOut', 'materialDisplay', 'materialDisplay-accessKey',
'myconference', 'myconference-myContributions', 'myconference-mySessions', 'myconference-myTracks', 'news',
'oauth-access_token', 'oauth-authorize', 'oauth-authorize_consumer', 'oauth-request_token', 'oauth-thirdPartyAuth',
'oauth-userThirdPartyAuth', 'paperReviewingDisplay', 'paperReviewingDisplay-downloadTemplate',
'paperReviewingDisplay-uploadPaper', 'payment', 'posterTemplates', 'posterTemplates-posterDesign',
'posterTemplates-posterPrinting', 'resetSessionTZ', 'roomBooking', 'roomBooking-acceptBooking',
'roomBooking-admin', 'roomBooking-adminLocation', 'roomBooking-bookingDetails',
'roomBooking-bookingForm', 'roomBooking-bookingList', 'roomBooking-bookRoom', 'roomBooking-cancelBooking',
'roomBooking-cancelBookingOccurrence', 'roomBooking-cloneBooking',
'roomBooking-deleteBooking', 'roomBooking-deleteCustomAttribute', 'roomBooking-deleteEquipment',
'roomBooking-deleteLocation', 'roomBooking-mapOfRooms', 'roomBooking-mapOfRoomsWidget',
'roomBooking-rejectBooking', 'roomBooking-rejectBookingOccurrence',
'roomBooking-roomDetails', 'roomBooking-roomList', 'roomBooking-roomStats',
'roomBooking-saveBooking', 'roomBooking-saveCustomAttributes', 'roomBooking-saveEquipment',
'roomBooking-saveLocation', 'roomBooking-search4Bookings', 'roomBooking-search4Rooms',
'roomBooking-setDefaultLocation', 'roomBooking-statement', 'roomBookingPluginAdmin',
'roomBookingPluginAdmin-zodbSave', 'roomMapper',
'roomMapper-creation', 'roomMapper-details', 'roomMapper-modify', 'roomMapper-performCreation',
'roomMapper-performModify', 'sessionDisplay', 'sessionDisplay-ical', 'sessionModifAC',
'sessionModifAC-setVisibility', 'sessionModifComm', 'sessionModifComm-edit', 'sessionModification',
'sessionModification-addContribs', 'sessionModification-close', 'sessionModification-contribAction',
'sessionModification-contribList', 'sessionModification-contribQuickAccess', 'sessionModification-contribsToPDF',
'sessionModification-editContrib', 'sessionModification-materials', 'sessionModification-materialsAdd',
'sessionModification-modify', 'sessionModification-open', 'sessionModification-participantList',
'sessionModifSchedule', 'sessionModifSchedule-fitSlot', 'sessionModifSchedule-slotCalc', 'sessionModifTools',
'sessionModifTools-delete', 'signIn', 'signIn-active', 'signIn-disabledAccount', 'signIn-sendActivation',
'signIn-sendLogin', 'signIn-sso', 'signIn-unactivatedAccount', 'subContributionDisplay',
'subContributionDisplay-marcxml', 'subContributionModification', 'subContributionModification-data',
'subContributionModification-materials', 'subContributionModification-materialsAdd',
'subContributionModification-modifData', 'subContributionTools', 'subContributionTools-delete', 'taskManager',
'trackAbstractModif', 'trackAbstractModif-abstractAction', 'trackAbstractModif-abstractToPDF',
'trackAbstractModif-accept', 'trackAbstractModif-commentEdit', 'trackAbstractModif-commentNew',
'trackAbstractModif-commentRem', 'trackAbstractModif-comments', 'trackAbstractModif-directAccess',
'trackAbstractModif-markAsDup', 'trackAbstractModif-proposeForOtherTracks', 'trackAbstractModif-proposeToBeAcc',
'trackAbstractModif-proposeToBeRej', 'trackAbstractModif-reject', 'trackAbstractModif-unMarkAsDup',
'trackModContribList', 'trackModContribList-contribAction', 'trackModContribList-contribQuickAccess',
'trackModContribList-contribsToPDF', 'trackModContribList-participantList', 'trackModifAbstracts',
'trackModifCoordination', 'trackModification', 'trackModification-modify', 'trackModification-performModify',
'updateNews', 'userAbstracts', 'userAbstracts-pdf', 'userAPI', 'userAPI-block', 'userAPI-create', 'userAPI-delete',
'userBaskets', 'userDashboard', 'userDetails', 'userList', 'userManagement',
'userManagement-switchAuthorisedAccountCreation', 'userManagement-switchModerateAccountCreation',
'userManagement-switchNotifyAccountCreation', 'userMerge', 'userPreferences', 'userRegistration',
'userRegistration-active', 'userRegistration-created', 'userRegistration-disable', 'userRegistration-UserExist',
'wcalendar', 'wcalendar-select', 'xmlGateway', 'xmlGateway-getCategoryInfo', 'xmlGateway-getStatsIndico',
'xmlGateway-getStatsRoomBooking', 'xmlGateway-loginStatus', 'xmlGateway-signIn', 'xmlGateway-signOut',
'xmlGateway-webcastForthcomingEvents', 'xmlGateway-webcastOnAir'
}
| gpl-3.0 | -6,249,722,430,769,871,000 | 83.153257 | 119 | 0.785103 | false |
merc-devel/merc | merc/features/rfc1459/nick.py | 1 | 1773 | import regex
from merc import errors
from merc import feature
from merc import message
MAX_NICKNAME_LENGTH = 12
NICKNAME_REGEX = regex.compile(r"^[\p{L}\p{So}_\[\]\\^{}|`][\p{L}\p{So}\p{N}_\[\]\\^{}|`-]*$")
class NickFeature(feature.Feature):
NAME = __name__
install = NickFeature.install
class _Nick(message.Command):
def handle_for(self, app, user, prefix):
target = self.get_target(app, user)
old_hostmask = target.hostmask
if NICKNAME_REGEX.match(self.nickname) is None or \
len(self.nickname) > MAX_NICKNAME_LENGTH:
raise errors.ErroneousNickname
app.users.rename(target, self.nickname)
if target.is_registered:
app.network.user_broadcast(target, old_hostmask, Nick(self.nickname))
target.send(old_hostmask, Nick(self.nickname))
else:
target.registration_latch.decrement()
@NickFeature.register_user_command
class Nick(_Nick):
NAME = "NICK"
MIN_ARITY = 1
def __init__(self, nickname, *args):
self.nickname = nickname
def as_command_params(self):
return [self.nickname]
def get_target(self, app, user):
return user
@NickFeature.register_user_command
class SANick(_Nick):
NAME = "SANICK"
MIN_ARITY = 2
def __init__(self, target, nickname, *args):
self.target = target
self.nickname = nickname
def get_target(self, app, user):
return app.users.get(self.target)
@message.Command.requires_registration
def handle_for(self, app, user, prefix):
user.check_is_irc_operator()
super().handle_for(app, user, prefix)
@NickFeature.hook("server.isupport.modify")
def modify_isupport(app, isupport):
isupport["NICKLEN"] = MAX_NICKNAME_LENGTH
@NickFeature.hook("user.connect")
def on_connect(app, user):
user.registration_latch.increment()
| mit | 8,278,732,483,026,297,000 | 22.025974 | 94 | 0.684715 | false |
yang0110/comPy | modulation.py | 1 | 10528 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn import preprocessing
import pylab
import itertools
from line_coding import polar_nrz
import math
from scipy import signal
from scipy.integrate import simps
import numpy.matlib
from compy.bin2gray import *
# m: order of modulation
# input_bits_array: np.array of binary bits
__all__=['constellation','bpsk_mod','mpsk_mod','mpsk_ref_symbol','qam_ref_symbol','pam_ref_symbol','qam_mod','mpsk_dem',
'qam_dem','pam_mod','pam_dem','spatial_modulation_qam','sm_constellation','generalized_spatial_modulation_qam','gsm_ref_symbol_combination',
'gsm_look_up_table','mimo_look_up_table','ncr','Ber']
def constellation(data):
re=np.real(data)
im=np.imag(data)
plt.scatter(re,im,s=50)
plt.xlim(min(re)-1,max(re)+1)
plt.ylim(min(im)-1,max(im)+1)
plt.title('qma_%s'%(len(data)))
plt.show()
def bpsk_mod(input_bits_array):
bpsk=2*np.round(input_bits_array)-1
return bpsk
# output bits array [-1,1....]
def mpsk_mod(input_bits_array,m):
# m_array=[2.0,4.0,8.0,16.0]
m=float(m)
input_ints=bits_to_binary_to_int(input_bits_array,m)
I=np.cos(input_ints/m*2*np.pi+np.pi/4.0)
Q=np.sin(input_ints/m*2*np.pi+np.pi/4.0)
mpsk=I+1j*Q
return mpsk
def mpsk_ref_symbol(m):
m=float(m)
ref_bits=np.arange(m)
s_i=np.cos(ref_bits/m*2*np.pi+np.pi/4.0)
s_q=np.sin(ref_bits/m*2*np.pi+np.pi/4.0)
mpsk_ref_symbol=s_i+1j*s_q
return mpsk_ref_symbol
def qam_ref_symbol(m):
if m==8:
m=16
m=float(m)
ref_values=np.arange(1,np.sqrt(m))
ref_values=ref_values[0::2]
v1=ref_values
v2=ref_values*(-1)
ref=np.hstack((v1,v2))
ref_com=np.array(list(itertools.product(ref,repeat=2)))
ref_symbol=ref_com[:,0]+1j*ref_com[:,1]
qam=ref_symbol[np.where(abs(np.imag(ref_symbol))<=1)]
elif m==32:
m=64
m=float(m)
ref_values=np.arange(1,np.sqrt(m))
ref_values=ref_values[0::2]
v1=ref_values
v2=ref_values*(-1)
ref=np.hstack((v1,v2))
ref_com=np.array(list(itertools.product(ref,repeat=2)))
ref_symbol=ref_com[:,0]+1j*ref_com[:,1]
qam=ref_symbol[np.where(abs(ref_symbol)<7.07)]
else:
m=float(m)
ref_values=np.arange(1,np.sqrt(m))
ref_values=ref_values[0::2]
v1=ref_values
v2=ref_values*(-1)
ref=np.hstack((v1,v2))
ref_com=np.array(list(itertools.product(ref,repeat=2)))
ref_symbol=ref_com[:,0]+1j*ref_com[:,1]
qam=ref_symbol
return qam
def pam_ref_symbol(m,ini_phase):
ref_symbol=np.arange(-(m-1),m,2)*np.exp(1j*ini_phase)
return ref_symbol
def qam_mod(input_bits_array,m,type='binary'):
#m_array=[4.0,16.0,64.0]
m=float(m)
ref_symbol=qam_ref_symbol(m)
if type=='binary':
input_ints=bits_to_binary_to_int(input_bits_array,m)
elif type=='gray':
input_ints=bits_to_gray_to_int(input_bits_array,m)
else:
print 'error type: type must be "binary" or "gray"'
input_sym=ref_symbol[input_ints]
qam_symbol=input_sym
return qam_symbol
def mpsk_dem(received_symbols,m):
m=float(m)
mpsk_symbol=mpsk_ref_symbol(m)
mpsk_symbol=np.reshape(mpsk_symbol,(1,len(mpsk_symbol)))
repeat_mpsk=np.repeat(mpsk_symbol,len(received_symbols),axis=0)
reshape_received=np.reshape(received_symbols,(len(received_symbols),1))
repeat_received=np.repeat(reshape_received,mpsk_symbol.shape[1],axis=1)
distance=np.sqrt((np.real(repeat_received)-np.real(repeat_mpsk))**2+
(np.imag(repeat_received)-np.imag(repeat_mpsk))**2)
min_distance_index=np.argmin(distance,axis=1)
return min_distance_index
def qam_dem(received_symbols,m):
m=float(m)
qam_symbol=qam_ref_symbol(m)
qam_symbol=np.reshape(qam_symbol,(1,len(qam_symbol)))
repeat_qam=np.repeat(qam_symbol,len(received_symbols),axis=0)
reshape_received=np.reshape(received_symbols,(len(received_symbols),1))
repeat_received=np.repeat(reshape_received,qam_symbol.shape[1],axis=1)
distance=np.sqrt((np.real(repeat_received)-np.real(repeat_qam))**2+
(np.imag(repeat_received)-np.imag(repeat_qam))**2)
min_distance_index=np.argmin(distance,axis=1)
return min_distance_index
def pam_mod(input_bits,m,ini_phase,type='binary'):
m=float(m)
if type=='binary':
input_ints=bits_to_binary_to_int(input_bits,m)
elif type=='gray':
input_ints=bits_to_gray_to_int(input_bits,m)
else:
print 'error type: type must be "binary" or "gray"'
ref_symbol=np.arange(-(m-1),m,2)*np.exp(1j*ini_phase)
pam_symbol=ref_symbol[input_ints]
return pam_symbol
def pam_dem(received_symbols,m,ini_phase):
ref_symbol=np.arange(-(m-1),m,2)*np.exp(1j*ini_phase)
ref_symbol=np.reshape(ref_symbol,(1,len(ref_symbol)))
repeat_pam=np.repeat(ref_symbol,len(received_symbols),axis=0)
reshape_received=np.reshape(received_symbols,(len(received_symbols),1))
repeat_received=np.repeat(reshape_received,ref_symbol.shape[1],axis=1)
distance=np.sqrt((np.real(repeat_received)-np.real(repeat_pam))**2+
(np.imag(repeat_received)-np.imag(repeat_pam))**2)
min_distance_index=np.argmin(distance,axis=1)
received_ints=min_distance_index
return received_ints
def spatial_modulation_qam(input_bits,nt,m,type='None'):
k=np.log2(m)+np.log2(nt)
a=np.log2(m)
b=np.log2(nt)
nb=len(input_bits)
reshape_input_bits=np.transpose(np.reshape(input_bits,(nb/k,k)))
symbol_input_bits=reshape_input_bits[:a,:]
attenna_input_bits=reshape_input_bits[a:,:]
symbol_input_bits2=np.reshape(np.transpose(symbol_input_bits),(1,
symbol_input_bits.shape[0]*symbol_input_bits.shape[1])).ravel()
attenna_input_bits2=np.reshape(np.transpose(attenna_input_bits),(1,
attenna_input_bits.shape[0]*attenna_input_bits.shape[1])).ravel()
if type=='None' or 'binary':
symbol_input_int=bits_to_binary_to_int(symbol_input_bits2,m)
attenna_input_int=bits_to_binary_to_int(attenna_input_bits2,nt)
elif type=='gray':
symbol_input_int=bits_to_gray_to_int(symbol_input_bits2,m)
attenna_input_int=bits_to_gray_to_int(attenna_input_bits2,nt)
else:
print 'error type: type must be "binary" or "gray"'
norm_ref_symbol=qam_ref_symbol(m)
norm_input_symbol=norm_ref_symbol[symbol_input_int]
symbol_and_attenna=np.vstack((norm_input_symbol,attenna_input_int))
X=np.zeros((nt,symbol_and_attenna.shape[1]))*(1j)
for i in np.arange(symbol_and_attenna.shape[1]):
attenna_number=int(symbol_and_attenna[1,i])
X[attenna_number,i]=symbol_and_attenna[0,i]
sm_modulated_symbol=X
return sm_modulated_symbol
def sm_constellation(ref_symbol,nt):
all_symbol_position=np.zeros((nt,nt*len(ref_symbol)))*1j
for j in np.arange(len(ref_symbol)):
for i in np.arange(j*nt,(j+1)*nt):
all_symbol_position[i-j*nt,i]=ref_symbol[j]
return all_symbol_position
def generalized_spatial_modulation_qam(input_bits,nt,n_act,m):
nb_attenna_com=ncr(nt,n_act)
a=np.log2(m)
b=np.log2(2**np.floor(np.log2(nb_attenna_com)))
nb=len(input_bits)
k=float(a+b)
reshape_input_bits=np.transpose(np.reshape(input_bits,((nb/k),k)))
symbol_input_bits=reshape_input_bits[:a,:]
attenna_input_bits=reshape_input_bits[a:,:]
symbol_input_bits2=np.reshape(np.transpose(symbol_input_bits),(1,
symbol_input_bits.shape[0]*symbol_input_bits.shape[1])).ravel()
attenna_input_bits2=np.reshape(np.transpose(attenna_input_bits),(1,
attenna_input_bits.shape[0]*attenna_input_bits.shape[1])).ravel()
if type=='None' or 'binary':
symbol_input_int=bits_to_binary_to_int(symbol_input_bits2,m)
attenna_input_int=bits_to_binary_to_int(attenna_input_bits2,2**b)
elif type=='gray':
symbol_input_int=bits_to_gray_to_int(symbol_input_bits2,m)
attenna_input_int=bits_to_gray_to_int(attenna_input_bits2,2**b)
else:
print 'error type: type must be "binary" or "gray"'
norm_ref_symbol=qam_ref_symbol(m)
norm_input_symbol=norm_ref_symbol[symbol_input_int]
symbol_and_attenna=np.vstack((norm_input_symbol,attenna_input_int))
attenna_com=np.array(list(itertools.combinations(np.arange(nt),n_act)))
nb_com=np.reshape(np.arange(len(attenna_com)),(len(attenna_com),1))
nb_and_com=np.hstack((nb_com,attenna_com))
X=np.zeros((nt,symbol_and_attenna.shape[1]))*(1j)
for i in np.arange(symbol_and_attenna.shape[1]):
attenna_number=(nb_and_com[symbol_and_attenna[1,i],1:]).astype(int)
X[attenna_number,i]=symbol_and_attenna[0,i]
return X
def gsm_ref_symbol_combination(nt,n_act,ref_symbol):
attenna_combination=np.array(list(itertools.combinations(np.arange(nt),n_act)))
b=2**np.floor(np.log2(len(attenna_combination)))
attenna_combination=attenna_combination[:b,:]
symbol_combination=np.reshape(ref_symbol,(len(ref_symbol),1))
symbol_attenna_combination=np.array(list(itertools.product(symbol_combination,attenna_combination)))
look_up_table1=np.transpose(symbol_attenna_combination)
ref_symbol_combination=np.zeros((nt,look_up_table1.shape[1]))*1j
for i in np.arange(look_up_table1.shape[1]):
ref_symbol_combination[look_up_table1[1,i][0],i]=look_up_table1[0,i][0]
ref_symbol_combination[look_up_table1[1,i][1],i]=look_up_table1[0,i][0]
return ref_symbol_combination
def gsm_look_up_table(nt,n_act,ref_symbol):
b=2**np.floor(np.log2(ncr(nt,n_act)))
symbol_int_combination=np.arange(len(ref_symbol))
symbol_attenna_int_combination=np.array(list(itertools.product(symbol_int_combination,np.arange(b))))
return symbol_attenna_int_combination.astype(int)
def Ber(input_bits,cap_bits):
ber=np.sum(cap_bits!=input_bits)/float(len(input_bits))
return ber
def ncr(n,r):
import math
f=math.factorial
return f(n)/f(r)/f(n-r)
def mimo_look_up_table(nt,ref_symbol):
symbol_order=np.reshape(np.arange(len(ref_symbol)),(1,len(ref_symbol)))
row_1=np.repeat(symbol_order,4,axis=1)
attenna_order=np.reshape(np.arange(nt),(1,nt))
row_2=np.reshape(np.repeat(attenna_order,len(ref_symbol),axis=0),(1,nt*len(ref_symbol)))
look_up_table=np.vstack((row_1,row_2))
look_up_table=np.transpose(look_up_table)
return look_up_table
# input_bits=np.random.randint(2,size=300)
# pam_modulation=pam_mod(input_bits,8,np.pi/4.0,'binary')
# constellation(pam_modulation)
# dem_pam=pam_dem(pam_modulation,8,np.pi/4.0)
# input_ints=bits_to_binary_to_int(input_bits,8)
# ber=np.sum(input_ints!=dem_pam)
# print ber
# input_bits=np.random.randint(2,size=300)
# pam_modulation=pam_mod(input_bits,8,np.pi/4.0,'gray')
# constellation(pam_modulation)
# dem_pam=pam_dem(pam_modulation,8,np.pi/4.0)
# input_ints=bits_to_gray_to_int(input_bits,8)
# ber=np.sum(input_ints!=dem_pam)
# print ber
| mit | 2,672,909,887,420,412,000 | 33.567568 | 140 | 0.698898 | false |
ivanbgd/Genetic_Algorithm_01 | GA01v2.py | 1 | 7880 | # Python 2.7
"""
An Application of Genetic Algorithms
Task:
Inscribe a triangle of the maximum area in a given ellipse.
Ellipse is defined as: (x/a)^2 + (y/b)^2 = 1
"""
import math
import matplotlib.pyplot as plt
import numpy as np
import random
from timeit import default_timer as timer
tstart = timer()
# Definition of parameters of the GA
N = 100 # number of units (chromosomes)
L = 36 # length of a chromosome (12 bits per vertex)
pc = 0.8 # crossover probability
pm = 0.001 # mutation probability
G = 0.8 # generation gap
# Parameters of the ellipse
a = 5
b = 3
#################
# The algorithm #
#################
# Maximum number with L bits
maxnum = 2**L - 1
# We'll use these a lot
a2 = float(a * a)
b2 = float(b * b)
a2b2 = a2 * b2
Lthird = L//3
twoLthirds = 2 * Lthird
maxl3 = 2**Lthird - 1
piHalf = np.pi / 2
threePiHalf = 3. * piHalf
a2rec = 1. / a2
mask = long((1 << Lthird) - 1)
# The first generation - Array of N chromosomes, each consisting of L bits
newgen = [random.randint(0, maxnum) for i in range(N)]
oldgen = np.empty(N, dtype = np.uint64)
# Vertices of the triangles; a vertex is defined by its angle to the positive x-axis in radians
V1 = np.empty(N)
V2 = np.empty(N)
V3 = np.empty(N)
# Coordinates of the vertices
x1 = np.empty(N)
y1 = np.empty(N)
x2 = np.empty(N)
y2 = np.empty(N)
x3 = np.empty(N)
y3 = np.empty(N)
# Fitness function
f = np.empty(N)
# Array that holds the maximum value of fitness function in every generation
Fmax = []
# generation number counter
gencnt = 0
# condition for staying in the loop
cond = True
#The main loop
while cond:
# Evaluation of the newly formed generation
for i in range(N):
V1[i] = (float((newgen[i] >> twoLthirds) & mask) / maxl3) * 2.0 * np.pi
V2[i] = (float((newgen[i] >> Lthird) & mask) / maxl3) * 2.0 * np.pi
V3[i] = (float(newgen[i] & mask) / maxl3) * 2.0 * np.pi
# Coordinates of vertex V1
if (V1[i] < piHalf) or (V1[i] > threePiHalf):
x = math.sqrt(a2b2 / (a2 * math.tan(V1[i])**2 + b2))
else:
x = -math.sqrt(a2b2 / (a2 * math.tan(V1[i])**2 + b2))
y = x * math.tan(V1[i])
x1[i] = x
y1[i] = y
# Coordinates of vertex V2
if (V2[i] < piHalf) or (V2[i] > threePiHalf):
x = math.sqrt(a2b2 / (a2 * math.tan(V2[i])**2 + b2))
else:
x = -math.sqrt(a2b2 / (a2 * math.tan(V2[i])**2 + b2))
y = x * math.tan(V2[i])
x2[i] = x
y2[i] = y
# Coordinates of vertex V3
if (V3[i] < piHalf) or (V3[i] > threePiHalf):
x = math.sqrt(a2b2 / (a2 * math.tan(V3[i])**2 + b2))
else:
x = -math.sqrt(a2b2 / (a2 * math.tan(V3[i])**2 + b2))
y = x * math.tan(V3[i])
x3[i] = x
y3[i] = y
# Lenghts of the triangle's edges
la = math.sqrt((x2[i] - x1[i])**2 + (y2[i] - y1[i])**2)
lb = math.sqrt((x3[i] - x1[i])**2 + (y3[i] - y1[i])**2)
lc = math.sqrt((x3[i] - x2[i])**2 + (y3[i] - y2[i])**2)
# Semiperimeter of the triangle
s = (la + lb + lc) / 2.
# Fitness function (Heron's formula)
f[i] = math.sqrt(s * (s - la) * (s - lb) * (s - lc))
# The highest (best) value of f
maxf = np.amax(f)
# Index of the highest value of f
maxfindex = np.argmax(f)
Fmax.append(maxf)
# Plotting the result
plt.figure("An Application of Genetic Algorithms")
plt.hold(True)
plt.title("Generation number {}\nThe best result: {:.4f}".format(gencnt + 1, maxf))
plt.xlim(-a - 1, a + 1)
plt.ylim(-b - 1, b + 1)
# Drawing the ellipse
ellipse = np.array([[0.] * 361, [0.] * 361], dtype = float)
for i in range(361):
theta = 2.*np.pi*i/360.
if (theta <= piHalf) or (theta > threePiHalf):
x = math.fabs(math.sqrt(1./(a2rec + (math.tan(theta)**2)/b2)))
else:
x = -math.fabs(math.sqrt(1./(a2rec + (math.tan(theta)**2)/b2)))
y = x * math.tan(theta)
ellipse[0][i] = x
ellipse[1][i] = y
plt.plot(ellipse[0], ellipse[1], 'g', linewidth = 4.0) # thick green line
# Drawing the triangles that we got
for i in range(N):
if f[i] == maxf:
# The best chromosome - the triangle with the largest area
plt.plot([x1[i], x2[i], x3[i], x1[i]], [y1[i], y2[i], y3[i], y1[i]], 'r', linewidth = 4.0) # thick red line
else:
# The other chromosomes (triangles); they are all inscribed in the ellipse, but they don't have the largest area
plt.plot([x1[i], x2[i], x3[i], x1[i]], [y1[i], y2[i], y3[i], y1[i]], 'b', linewidth = 0.5) # thin blue line
# Hold the graph for a given amount of time in seconds
plt.pause(0.1)
plt.hold(False)
plt.plot()
### Natural selection by the roulette wheel method ###
oldgen = np.copy(newgen)
# Cumulative function
cumf = f.cumsum()
# We let the best chromosome pass to the next generation directly.
newgen[0] = oldgen[maxfindex]
# We also let another randomly chosen (1-G)*N-1 chromosomes pass. Probability of their selection depends on f(i).
for i in range(1, int(round((1-G)*N))):
tmp = np.random.random() * cumf[-1]
firstPositive, firstPositiveIndex = np.amax(np.sign(cumf - tmp)), np.argmax(np.sign(cumf - tmp))
newgen[i] = oldgen[firstPositiveIndex]
### The rest of the new generation is formed by crossover (crossbreeding) ###
# There are two parents, and two offsprings
for i in range((N - int(round((1-G)*N)))//2):
tmp = np.random.random() * cumf[-1]
firstPositive, firstPositiveIndex = np.amax(np.sign(cumf - tmp)), np.argmax(np.sign(cumf - tmp))
parent1 = oldgen[firstPositiveIndex]
tmp = np.random.random() * cumf[-1]
firstPositive, firstPositiveIndex = np.amax(np.sign(cumf - tmp)), np.argmax(np.sign(cumf - tmp))
parent2 = oldgen[firstPositiveIndex]
if np.random.random() < pc:
# crossover
crossPoint = np.random.randint(1, L) # the crossover point can be after MSB and before LSB
maskLo = long((1 << (L - crossPoint)) - 1)
maskHi = maxnum & (~maskLo)
newgen[int(round((1-G)*N)) + 2*i] = (parent1 & maskHi) | (parent2 & maskLo) # offspring 1
newgen[int(round((1-G)*N)) + 2*i + 1] = (parent2 & maskHi) | (parent1 & maskLo) # offspring 2
else:
# no crossover
newgen[int(round((1-G)*N)) + 2*i] = parent1 # offspring 1
newgen[int(round((1-G)*N)) + 2*i + 1] = parent2 # offspring 2
### Mutation ###
for i in range(int(L * N * pm)):
chromosomeIndex = np.random.randint(N)
bitPosition = np.random.randint(L)
maskM = 1 << (L - bitPosition - 1)
newgen[chromosomeIndex] ^= maskM
gencnt += 1
# Exit condition - We want fitness functions of the first numchrom chromosomes to be inside of a given difference.
numchrom = 10
difference = 0.001
f.sort()
if abs(f[-1] - f[-numchrom]) < difference:
cond = False
tend = timer()
print("The result is: {}".format(max(Fmax)))
print("The algorithm took {} generations, and {} seconds to complete.".format(gencnt, round(tend - tstart, 3)))
print("The maximum value of fitness function through generations:\n{}".format(Fmax))
plt.figure("The maximum value of fitness function through generations")
plt.title("The maximum value of fitness function through generations")
plt.xlim(0, gencnt - 1)
plt.plot(Fmax)
plt.show()
| mit | -7,571,025,908,308,426,000 | 30.970711 | 124 | 0.55736 | false |
ayy1337/CryptoPriceWatcher | grabtrex.py | 1 | 5552 | #!/usr/bin/python3
'''
Version: 1.0.02
Author: ayy1337
Licence: GNU GPL v3.0
'''
import sys
import time
import os
import datetime
import urllib.request
import collections
from operator import attrgetter
from operator import itemgetter
import shelve
from trexapi import Bittrex
condperc = .01
mins = 5
period = mins * 60
timebetweenticks = 2 #in seconds
minutesofdatatokeep = 30
cwd = os.getcwd()
if os.name in ['posix','linux']:
databasepath = cwd + "/db"
else:
databasepath = cwd + "\\db"
class minute:
def __init__(self, ticker, o, change, timestamp, volume, prevday):
self.ticker = ticker
self.change = float(change)
self.average = self.open = self.high = self.low = self.close = float(o)
self.volume = float(volume)
self.timestamp = int(timestamp)
self.numprices = 1
self.prevday = prevday
class coin:
def __init__(self, ticker):
self.ticker = ticker
self.minutes = collections.deque(maxlen = (int(minutesofdatatokeep) + 1))
def addminute(self,ticker, timestamp):
i = ticker
t = i['MarketName']
try:
price = float(i['Last'])
prevday = float(i['PrevDay'])
volume = float(i['BaseVolume'])
except:
price = 0
prevday = 0
volume = 0
try:
change = (price/prevday) -1
except:
change = 0
self.minutes.append(minute(t,price,change,timestamp,volume, prevday)) #ticker, price, change, volume, timestamp
def updateminute(self,ticker,timestamp):
currmin = self.minutes[-1]
if (timestamp - currmin.timestamp) > 60:
self.addminute(ticker,timestamp)
else:
if ticker['Last'] == None:
print("New market added: {}".format(ticker["MarketName"]))
try:
last = float(ticker['Last'])
except:
last = 0
currmin.close = last
a = (currmin.average * currmin.numprices) + last
currmin.numprices += 1
currmin.average = a / currmin.numprices
if last > currmin.high:
currmin.high = last
if last < currmin.low:
currmin.low = last
try:
currmin.change = (currmin.close/currmin.prevday) -1
except:
currmin.change = 0
timestamp = int(time.time())
class updater:
def __init__(self):
self.coins = {}
try:
d = shelve.open(databasepath)
self.coins = d["trexcoins"]
d.close()
except:
pass
self.bittrex = Bittrex("","")
self.pcstatus = None
def update(self):
global timestamp
timestamp = int(time.time())
try:
self.coins = self.updatecoins(self.coins)
except:
return 1
gainers, losers = self.checkcond(self.coins)
try:
d = shelve.open(databasepath)
d['trexcoins'] = self.coins
d.close()
except:
pass
gainers = sorted(gainers, key=itemgetter(6,4))
losers = sorted(losers, key=itemgetter(6,4))
return gainers,losers
def updatecoins(self, coins):
data = self.bittrex.get_market_summaries()
if data['success'] == 1:
tickers = data['result']
else:
return
timestamp = int(time.time())
for item in tickers:
t = item['MarketName']
if item['MarketName'] not in coins:
coins[item['MarketName']] = coin(item['MarketName'])
if len(coins[t].minutes) > 0:
coins[t].updateminute(item,timestamp)
else:
coins[t].addminute(item, timestamp)
return coins
def checkcond(self, coins):
out = []
gainers = []
losers = []
for key in coins:
coin = coins[key]
mins = coin.minutes
tmp = []
endtime = mins[-1].timestamp
largestgain = 0
largestloss = 0
periodchange = 0
lowvol = ""
splt = key.split('-')
suffix = splt[0]
coinname = splt [1]
if suffix != "BTC":
continue
if 100 < mins[-1].volume < 500:
lowvol = 'l'
elif mins[-1].volume <= 100:
lowvol = 'v'
for i in range(1,len(mins)):
tick = mins[-i]
if (endtime - tick.timestamp) <= period:
tmp.append(tick) #tmp[0] is most recent minute, tmp[-1] is oldest/least-recent minute
else:
break
for i in range(1,len(tmp)+1):
for n in range(i+1, len(tmp)+1):
root = tmp[-i]
end = tmp[-n]
try:
changeup = (end.high - root.low) / root.low
except:
changeup = 0
if changeup > largestgain:
largestgain = changeup
try:
changedown = (end.low-root.high)/root.high
except:
changedown = 0
if changedown < largestloss:
largestloss = changedown
if(len(tmp) > 0):
try:
periodchange = ((mins[-1].close-mins[-0].average) / mins[0].average) * 100
except:
periodchange = 0
else:
continue
if (largestgain >= condperc) or (periodchange > 2):
gainers.append([coinname,largestgain*100,mins[-1].close,suffix, periodchange, int(mins[-1].change * 100), lowvol])
if ((largestloss*-1) >= condperc) or (periodchange < -2):
losers.append([coinname, largestloss * 100, mins[-1].close, suffix, periodchange, int(mins[-1].change * 100), lowvol])
return gainers, losers
def getfav(self, ticker):
splt = ticker.split('-')
c = self.coins[ticker]
mins = c.minutes
oldprice = mins[-(min(len(mins),5))].open
currprice = mins[-1].close
fiveminchange = ((currprice/oldprice)-1) * 100
oldprice = mins[-(min(len(mins),30))].open
thirtyminchange = ((currprice/oldprice)-1)*100
price = currprice
volume = mins[-1].volume
if volume > 500:
v = ' '
elif volume >100:
v = 'l'
else:
v = 'v'
tfhourchange = mins[-1].change * 100
return [splt[1]+'(b)', fiveminchange, price, thirtyminchange, tfhourchange, v]
def getlast(self, ticker):
return self.coins[ticker].minutes[-1].close
if __name__ == "__main__":
a = updater()
while 1:
a.update()
time.sleep(2) | gpl-3.0 | -6,008,384,891,887,564,000 | 22.629787 | 122 | 0.634726 | false |
noinil/euler_pysolution | python_solutions/p098_Anagramic_squares.py | 1 | 1634 | #!/usr/bin/env python3
from itertools import permutations as pm
def is_sqr(n):
i = round(n**0.5)
if i ** 2 == n:
return True
else:
return False
def main():
words = []
with open("../data/p98_words.txt", "r") as fin:
for lines in fin:
for i in lines.split(','):
if i != i[::-1]:
words.append(i[1:-1])
vals = []
anagramic_pairs = []
c_vals = []
for i in words:
t = 0
for c in i[:]:
t += 10**(ord(c) - ord('A'))
if t in vals:
if t in c_vals:
anagramic_pairs.append((words[vals.index(t, vals.index(t)+1)], i))
c_vals.append(t)
anagramic_pairs.append((words[vals.index(t)], i))
vals.append(t)
sqr_list = [i**2 for i in range(0, 4*10**4) if i**2 < 10**9]
digi_list = [i for i in range(0, 10)]
for i in anagramic_pairs:
worda, wordb = i[0], i[1]
chl = []
for c in worda:
if c not in chl:
chl.append(c)
n = len(chl)
print(worda, wordb, n)
pmiter = pm(digi_list, n)
for j in pmiter:
wa, wb = worda, wordb
for k in range(0, n):
wa = wa.replace(chl[k], str(j[k]))
wb = wb.replace(chl[k], str(j[k]))
if wa[0] == '0' or wb[0] == '0':
continue
va, vb = int(wa), int(wb)
# if va in sqr_list and vb in sqr_list:
if is_sqr(va) and is_sqr(vb):
print(worda, wordb, va, vb)
if __name__ == '__main__':
main()
| gpl-2.0 | -1,285,471,491,725,333,200 | 26.694915 | 82 | 0.442472 | false |
nofdev/fastforward | fastforward/horizon.py | 1 | 1829 | import sys
from fastforward.cliutil import priority
from playback.api import Horizon
def install(args):
try:
target = Horizon(user=args.user, hosts=args.hosts.split(','), key_filename=args.key_filename, password=args.password)
except AttributeError:
sys.stderr.write('No hosts found. Please using --hosts param.')
sys.exit(1)
target.install(
args.openstack_host,
args.memcached_servers,
args.time_zone)
@priority(20)
def make(parser):
"""provison Horizon with HA"""
s = parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
def install_f(args):
install(args)
install_parser = s.add_parser('install', help='install horizon')
install_parser.add_argument('--openstack-host',
help='configure the dashboard to use OpenStack services on the controller node e.g. CONTROLLER_VIP',
action='store',
default=None,
dest='openstack_host')
install_parser.add_argument('--memcached-servers',
help='django memcache e.g. CONTROLLER1:11211,CONTROLLER2:11211',
action='store',
default=None,
dest='memcached_servers')
install_parser.add_argument('--time-zone',
help='the timezone of the server. This should correspond with the timezone of your entire OpenStack installation e.g. Asia/Shanghai',
action='store',
default=None,
dest='time_zone')
install_parser.set_defaults(func=install_f)
| mit | 66,383,570,392,045,620 | 40.568182 | 165 | 0.545653 | false |
yausern/stlab | devices/Cryocon_44C.py | 1 | 3712 | import visa
import numpy as np
import time
from stlab.devices.instrument import instrument
class Cryocon_44C(instrument):
def __init__(self,addr='TCPIP::192.168.1.5::5000::SOCKET',reset=True,verb=True,**kwargs):
#RST reboots the instrument. Avoid... Also needs special read_termination = '\r\n'
if 'read_termination' not in kwargs:
kwargs['read_termination'] = '\r\n'
super().__init__(addr,reset=False,verb=verb,**kwargs)
self.id()
self.channellist=['A','B','C','D']
if reset:
for channel in self.channellist: #set all units to K
self.write('INP ' + channel + ':UNIT K')
def write(self,mystr): #REQUIRES SPECIAL WRITE WITH OPC CHECK...
self.query(mystr + ';*OPC?')
def GetTemperature(self,channel='C'):
mystr = 'INP? ' + channel
curr = self.query(mystr)
try:
curr = float(curr)
except ValueError:
print('Channel ',channel,' out of range')
curr = -20.
return curr
def GetTemperatureAll(self):
result = []
for chan in self.channellist:
result.append(self.GetTemperature(chan))
return result
def SetSetPoint(self,setp,loop=2):
mystr = 'LOOP ' + str(loop) + ':SETP ' + str(setp)
self.write(mystr)
def GetSetPoint(self,loop=2):
mystr = 'LOOP ' + str(loop) + ':SETP?'
setp = self.query(mystr)
return float(setp)
def SetSetPoint(self,setp,loop=2):
mystr = 'LOOP ' + str(loop) + ':SETP ' + str(setp)
self.write(mystr)
def GetSetPoint(self,loop=2):
mystr = 'LOOP ' + str(loop) + ':SETP?'
setp = self.query(mystr)
channel = self.query('LOOP '+ str(loop) +':SOUR?')
unit = self.query('INP ' + str(channel) + ':UNIT?')
print(setp)
return float(setp.strip(unit))
def SetPman(self,setp,loop=2):
mystr = 'LOOP ' + str(loop) + ':PMAN ' + str(setp)
self.write(mystr)
def GetPman(self,loop=2):
mystr = 'LOOP ' + str(loop) + ':PMAN?'
setp = self.query(mystr)
return float(setp)
def ControlOn(self):
self.write('CONT')
return
def ControlOff(self):
self.write('STOP')
return
def SetLoopMode(self,loop,mode): #OFF, PID, MAN, TABLE, RAMPP
self.write('LOOP ' + str(loop) + ':TYPE ' + str(mode))
return
def WaitForTStable(self,loop=2,tol=0.05,timeout=300.,tsettle=40.):
channel = self.query('LOOP ' + str(loop) + ':SOUR?') #Get channel on chosen loop
channel = channel.strip('CH')
Tset = self.GetSetPoint(loop)
t0 = time.time()
tnow = time.time()
tstablestart = None
success = False
while tnow-t0 < timeout:
tnow = time.time()
TT = self.GetTemperature(channel) #Get current temperature
if abs(TT-Tset)<tol:
if tstablestart == None:
tstablestart = tnow
print('T in tolerance. Settling...')
elif abs(TT-Tset)>=tol:
if tstablestart != None:
print('T left tolerance')
tstablestart = None
continue
if tnow-tstablestart > tsettle:
success = True
break
time.sleep(0.2)
if success:
print("Channel " + channel + " STABLE at " + str(Tset) + ' K')
else:
print("Channel " + channel + " UNSTABLE for " + str(Tset) + ' K')
return success
def GetMetadataString(self): #Should return a string of metadata adequate to write to a file
pass
| gpl-3.0 | 2,980,873,264,327,063,000 | 36.494949 | 96 | 0.545797 | false |
luozhaoyu/leetcode | valid_number.py | 1 | 2387 | # -*- coding: utf-8 -*-
"""
valid_number.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import re
class Solution:
# @param s, a string
# @return a boolean
def isNumber(self, s):
s = s.strip().lower()
if len(s) == 0:
return False
if s.count('.') > 1 or s.count('e') > 1:
return False
if s.startswith("-") or s.startswith("+"):
s = s[1:]
if s.isdigit():
return True
elif s.find("e") >= 0:
front, back = s.split('e')
#print front, back
if self.isDecimal(front) and self.isIntegerWithFrontZero(back):
return True
else:
return False
elif self.isZero(s):
return True
else:
return self.isDecimal(s)
def isZero(self, s):
if re.search(r"\.[0]+", s) or re.search(r"[0]\.", s) or s == "0":
return True
else:
return False
def isIntegerWithFrontZero(self, s):
if s.startswith('-') or s.startswith('+'):
s = s[1:]
if re.search(r"^\d+$", s):
return True
else:
return False
def isDecimal(self, s):
if s.startswith('-') or s.startswith('+'):
s = s[1:]
if re.search(r"^[0]{0,1}\.\d*[1-9]+\d*$", s):
return True
elif re.search(r"^[1-9]\d*\.{0,1}\d*$", s):
return True
else:
return False
def _main(argv):
s = Solution()
print s.isNumber("3")
print s.isNumber("0.1")
print s.isNumber(".1")
print s.isNumber(" 0.1")
print s.isNumber("2e10")
print -1, s.isNumber("-1")
print "+1.0", s.isNumber("+1.0")
print s.isNumber("46.0e7")
print s.isNumber("46.e7")
print s.isNumber("3.")
print s.isNumber(".2e7")
print s.isNumber(".0")
print s.isNumber(".00")
print s.isNumber("01.")
print s.isNumber("3")
print s.isNumber("1 a")
print s.isNumber("abc")
print s.isNumber("..2")
print s.isNumber("3..2")
print s.isNumber("")
print s.isNumber(".")
print s.isNumber(". 0e7")
print s.isNumber(".0e7")
print s.isNumber(".e7")
print s.isNumber("e7")
print s.isNumber("ee")
print s.isNumber("0..")
if __name__ == '__main__':
import sys
_main(sys.argv)
| mit | -6,678,119,990,164,091,000 | 22.87 | 75 | 0.485547 | false |
Orav/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/vcs/git.py | 1 | 8092 | import tempfile
import re
import os.path
from pip.util import call_subprocess
from pip.util import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.log import logger
from pip.backwardcompat import url2pathname, urlparse
urlsplit = urlparse.urlsplit
urlunsplit = urlparse.urlunsplit
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = ('git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file')
bundle_file = 'git-clone.txt'
guide = ('# This was a Git repo; to make it a repo again run:\n'
'git init\ngit remote add origin %(url)s -f\ngit checkout %(rev)s\n')
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = initial_slashes + url2pathname(path).replace('\\', '/').lstrip('/')
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit((scheme[after_plus:], netloc, newpath, query, fragment))
super(Git, self).__init__(url, *args, **kwargs)
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
url_match = re.search(r'git\s*remote\s*add\s*origin(.*)\s*-f', line)
if url_match:
url = url_match.group(1).strip()
rev_match = re.search(r'^git\s*checkout\s*-q\s*(.*)\s*', line)
if rev_match:
rev = rev_match.group(1).strip()
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
call_subprocess(
[self.cmd, 'checkout-index', '-a', '-f', '--prefix', location],
filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warn("Could not find a tag or branch '%s', assuming commit." % rev)
return rev_options
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'config', 'remote.origin.url', url], cwd=dest)
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
call_subprocess([self.cmd, 'fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maby even origin/master)
if rev_options:
rev_options = self.check_rev_options(rev_options[0], dest, rev_options)
call_subprocess([self.cmd, 'reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Cloning %s%s to %s' % (url, rev_display, display_path(dest)))
call_subprocess([self.cmd, 'clone', '-q', url, dest])
#: repo may contain submodules
self.update_submodules(dest)
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.get_revision(dest).startswith(rev_options[0]):
call_subprocess([self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = call_subprocess(
[self.cmd, 'config', 'remote.origin.url'],
show_stdout=False, cwd=location)
return url.strip()
def get_revision(self, location):
current_rev = call_subprocess(
[self.cmd, 'rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_refs(self, location):
"""Return map of named refs (branches or tags) to commit hashes."""
output = call_subprocess([self.cmd, 'show-ref'],
show_stdout=False, cwd=location)
rv = {}
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
ref = ref.strip()
ref_name = None
if ref.startswith('refs/remotes/'):
ref_name = ref[len('refs/remotes/'):]
elif ref.startswith('refs/heads/'):
ref_name = ref[len('refs/heads/'):]
elif ref.startswith('refs/tags/'):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit.strip()
return rv
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
refs = self.get_refs(location)
# refs maps names to commit hashes; we need the inverse
# if multiple names map to a single commit, this arbitrarily picks one
names_by_commit = dict((commit, ref) for ref, commit in refs.items())
if current_rev in names_by_commit:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, names_by_commit[current_rev])
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if not '://' in self.url:
assert not 'file:' in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
call_subprocess([self.cmd, 'submodule', 'update', '--init', '--recursive', '-q'],
cwd=location)
vcs.register(Git)
| lgpl-3.0 | 4,057,708,579,213,142,000 | 39.71134 | 111 | 0.547578 | false |
pataquets/namecoin-core | test/functional/wallet_importdescriptors.py | 1 | 26430 | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importdescriptors RPC.
Test importdescriptors by generating keys on node0, importing the corresponding
descriptors on node1 and then testing the address info for the different address
variants.
- `get_generate_key()` is called to generate keys and return the privkeys,
pubkeys and all variants of scriptPubKey and address.
- `test_importdesc()` is called to send an importdescriptors call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
find_vout_for_address,
)
from test_framework.wallet_util import (
get_generate_key,
test_address,
)
class ImportDescriptorsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"],
["-addresstype=bech32", "-keypool=5"]
]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_sqlite()
def test_importdesc(self, req, success, error_code=None, error_message=None, warnings=None, wallet=None):
"""Run importdescriptors and assert success"""
if warnings is None:
warnings = []
wrpc = self.nodes[1].get_wallet_rpc('w1')
if wallet is not None:
wrpc = wallet
result = wrpc.importdescriptors([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info('Setting up wallets')
self.nodes[0].createwallet(wallet_name='w0', disable_private_keys=False)
w0 = self.nodes[0].get_wallet_rpc('w0')
self.nodes[1].createwallet(wallet_name='w1', disable_private_keys=True, blank=True, descriptors=True)
w1 = self.nodes[1].get_wallet_rpc('w1')
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
self.nodes[1].createwallet(wallet_name="wpriv", disable_private_keys=False, blank=True, descriptors=True)
wpriv = self.nodes[1].get_wallet_rpc("wpriv")
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 0)
self.log.info('Mining coins')
w0.generatetoaddress(101, w0.getnewaddress())
# RPC importdescriptors -----------------------------------------------
# # Test import fails if no descriptor present
key = get_generate_key()
self.log.info("Import should fail if a descriptor is not provided")
self.test_importdesc({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor not found.')
# # Test importing of a P2PKH descriptor
key = get_generate_key()
self.log.info("Should import a p2pkh descriptor")
self.test_importdesc({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": "Descriptor import test"},
success=True)
test_address(w1,
key.p2pkh_addr,
solvable=True,
ismine=True,
labels=["Descriptor import test"])
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
self.log.info("Internal addresses cannot have labels")
self.test_importdesc({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"internal": True,
"label": "Descriptor import test"},
success=False,
error_code=-8,
error_message="Internal addresses should not have a label")
# # Test importing of a P2SH-P2WPKH descriptor
key = get_generate_key()
self.log.info("Should not import a p2sh-p2wpkh descriptor without checksum")
self.test_importdesc({"desc": "sh(wpkh(" + key.pubkey + "))",
"timestamp": "now"
},
success=False,
error_code=-5,
error_message="Missing checksum")
self.log.info("Should not import a p2sh-p2wpkh descriptor that has range specified")
self.test_importdesc({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"range": 1,
},
success=False,
error_code=-8,
error_message="Range should not be specified for an un-ranged descriptor")
self.log.info("Should not import a p2sh-p2wpkh descriptor and have it set to active")
self.test_importdesc({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"active": True,
},
success=False,
error_code=-8,
error_message="Active descriptors must be ranged")
self.log.info("Should import a (non-active) p2sh-p2wpkh descriptor")
self.test_importdesc({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"active": False,
},
success=True)
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
test_address(w1,
key.p2sh_p2wpkh_addr,
ismine=True,
solvable=True)
# Check persistence of data and that loading works correctly
w1.unloadwallet()
self.nodes[1].loadwallet('w1')
test_address(w1,
key.p2sh_p2wpkh_addr,
ismine=True,
solvable=True)
# # Test importing of a multisig descriptor
key1 = get_generate_key()
key2 = get_generate_key()
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importdesc({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True)
self.log.info("Should not treat individual keys from the imported bare multisig as watchonly")
test_address(w1,
key1.p2pkh_addr,
ismine=False)
# # Test ranged descriptors
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
xpub = "tpubD6NzVbkrYhZ4YNXVQbNhMK1WqguFsUXceaVJKbmno2aZ3B6QfbMeraaYvnBSGpV3vxLyTTK9DYT1yoEck4XUScMzXoQ2U2oSmE2JyMedq3H"
addresses = ["2N7yv4p8G8yEaPddJxY41kPihnWvs39qCMf", "2MsHxyb2JS3pAySeNUsJ7mNnurtpeenDzLA"] # hdkeypath=m/0'/0'/0' and 1'
addresses += ["ncrt1qrd3n235cj2czsfmsuvqqpr3lu6lg0ju76qa6px", "ncrt1qfqeppuvj0ww98r6qghmdkj70tv8qpcheap27pj"] # wpkh subscripts corresponding to the above addresses
desc = "sh(wpkh(" + xpub + "/0/0/*" + "))"
self.log.info("Ranged descriptors cannot have labels")
self.test_importdesc({"desc":descsum_create(desc),
"timestamp": "now",
"range": [0, 100],
"label": "test"},
success=False,
error_code=-8,
error_message='Ranged descriptors should not have a label')
self.log.info("Private keys required for private keys enabled wallet")
self.test_importdesc({"desc":descsum_create(desc),
"timestamp": "now",
"range": [0, 100]},
success=False,
error_code=-4,
error_message='Cannot import descriptor without private keys to a wallet with private keys enabled',
wallet=wpriv)
self.log.info("Ranged descriptor import should warn without a specified range")
self.test_importdesc({"desc": descsum_create(desc),
"timestamp": "now"},
success=True,
warnings=['Range not given, using default keypool range'])
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
# # Test importing of a ranged descriptor with xpriv
self.log.info("Should not import a ranged descriptor that includes xpriv into a watch-only wallet")
desc = "sh(wpkh(" + xpriv + "/0'/0'/*'" + "))"
self.test_importdesc({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=False,
error_code=-4,
error_message='Cannot import private keys to a wallet with private keys disabled')
for address in addresses:
test_address(w1,
address,
ismine=False,
solvable=False)
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
# Make sure ranged imports import keys in order
w1 = self.nodes[1].get_wallet_rpc('w1')
self.log.info('Key ranges should be imported in order')
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'ncrt1qtmp74ayg7p24uslctssvjm06q5phz4yrvy646e', # m/0'/0'/0
'ncrt1q8vprchan07gzagd5e6v9wd7azyucksq2vqu8lj', # m/0'/0'/1
'ncrt1qtuqdtha7zmqgcrr26n2rqxztv5y8rafjtaapkf', # m/0'/0'/2
'ncrt1qau64272ymawq26t90md6an0ps99qkrse7le8u6', # m/0'/0'/3
'ncrt1qsg97266hrh6cpmutqen8s4s962aryy77ced5p6', # m/0'/0'/4
]
self.test_importdesc({'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'active': True,
'range' : [0, 2],
'timestamp': 'now'
},
success=True)
self.test_importdesc({'desc': descsum_create('sh(wpkh([abcdef12/0h/0h]' + xpub + '/*))'),
'active': True,
'range' : [0, 2],
'timestamp': 'now'
},
success=True)
self.test_importdesc({'desc': descsum_create('pkh([12345678/0h/0h]' + xpub + '/*)'),
'active': True,
'range' : [0, 2],
'timestamp': 'now'
},
success=True)
assert_equal(w1.getwalletinfo()['keypoolsize'], 5 * 3)
for i, expected_addr in enumerate(addresses):
received_addr = w1.getnewaddress('', 'bech32')
assert_raises_rpc_error(-4, 'This wallet has no available keys', w1.getrawchangeaddress, 'bech32')
assert_equal(received_addr, expected_addr)
bech32_addr_info = w1.getaddressinfo(received_addr)
assert_equal(bech32_addr_info['desc'][:23], 'wpkh([80002067/0\'/0\'/{}]'.format(i))
shwpkh_addr = w1.getnewaddress('', 'p2sh-segwit')
shwpkh_addr_info = w1.getaddressinfo(shwpkh_addr)
assert_equal(shwpkh_addr_info['desc'][:26], 'sh(wpkh([abcdef12/0\'/0\'/{}]'.format(i))
pkh_addr = w1.getnewaddress('', 'legacy')
pkh_addr_info = w1.getaddressinfo(pkh_addr)
assert_equal(pkh_addr_info['desc'][:22], 'pkh([12345678/0\'/0\'/{}]'.format(i))
assert_equal(w1.getwalletinfo()['keypoolsize'], 4 * 3) # After retrieving a key, we don't refill the keypool again, so it's one less for each address type
w1.keypoolrefill()
assert_equal(w1.getwalletinfo()['keypoolsize'], 5 * 3)
# Check active=False default
self.log.info('Check imported descriptors are not active by default')
self.test_importdesc({'desc': descsum_create('pkh([12345678/0h/0h]' + xpub + '/*)'),
'range' : [0, 2],
'timestamp': 'now',
'internal': True
},
success=True)
assert_raises_rpc_error(-4, 'This wallet has no available keys', w1.getrawchangeaddress, 'legacy')
# # Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
address = "2MuhcG52uHPknxDgmGPsV18jSHFBnnRgjPg"
desc = "sh(wpkh(" + wif_priv + "))"
self.log.info("Should import a descriptor with a WIF private key as spendable")
self.test_importdesc({"desc": descsum_create(desc),
"timestamp": "now"},
success=True,
wallet=wpriv)
test_address(wpriv,
address,
solvable=True,
ismine=True)
txid = w0.sendtoaddress(address, 49.99995540)
w0.generatetoaddress(6, w0.getnewaddress())
self.sync_blocks()
tx = wpriv.createrawtransaction([{"txid": txid, "vout": 0}], {w0.getnewaddress(): 49.999})
signed_tx = wpriv.signrawtransactionwithwallet(tx)
w1.sendrawtransaction(signed_tx['hex'])
# Make sure that we can use import and use multisig as addresses
self.log.info('Test that multisigs can be imported, signed for, and getnewaddress\'d')
self.nodes[1].createwallet(wallet_name="wmulti_priv", disable_private_keys=False, blank=True, descriptors=True)
wmulti_priv = self.nodes[1].get_wallet_rpc("wmulti_priv")
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 0)
self.test_importdesc({"desc":"wsh(multi(2,tprv8ZgxMBicQKsPevADjDCWsa6DfhkVXicu8NQUzfibwX2MexVwW4tCec5mXdCW8kJwkzBRRmAay1KZya4WsehVvjTGVW6JLqiqd8DdZ4xSg52/84h/0h/0h/*,tprv8ZgxMBicQKsPdSNWUhDiwTScDr6JfkZuLshTRwzvZGnMSnGikV6jxpmdDkC3YRc4T3GD6Nvg9uv6hQg73RVv1EiTXDZwxVbsLugVHU8B1aq/84h/0h/0h/*,tprv8ZgxMBicQKsPeonDt8Ka2mrQmHa61hQ5FQCsvWBTpSNzBFgM58cV2EuXNAHF14VawVpznnme3SuTbA62sGriwWyKifJmXntfNeK7zeqMCj1/84h/0h/0h/*))#m2sr93jn",
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_priv)
self.test_importdesc({"desc":"wsh(multi(2,tprv8ZgxMBicQKsPevADjDCWsa6DfhkVXicu8NQUzfibwX2MexVwW4tCec5mXdCW8kJwkzBRRmAay1KZya4WsehVvjTGVW6JLqiqd8DdZ4xSg52/84h/1h/0h/*,tprv8ZgxMBicQKsPdSNWUhDiwTScDr6JfkZuLshTRwzvZGnMSnGikV6jxpmdDkC3YRc4T3GD6Nvg9uv6hQg73RVv1EiTXDZwxVbsLugVHU8B1aq/84h/1h/0h/*,tprv8ZgxMBicQKsPeonDt8Ka2mrQmHa61hQ5FQCsvWBTpSNzBFgM58cV2EuXNAHF14VawVpznnme3SuTbA62sGriwWyKifJmXntfNeK7zeqMCj1/84h/1h/0h/*))#q3sztvx5",
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_priv)
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 1001) # Range end (1000) is inclusive, so 1001 addresses generated
addr = wmulti_priv.getnewaddress('', 'bech32')
assert_equal(addr, 'ncrt1qdt0qy5p7dzhxzmegnn4ulzhard33s2809arjqgjndx87rv5vd0fqhrnwwh') # Derived at m/84'/0'/0'/0
change_addr = wmulti_priv.getrawchangeaddress('bech32')
assert_equal(change_addr, 'ncrt1qt9uhe3a9hnq7vajl7a094z4s3crm9ttf8zw3f5v9gr2nyd7e3lnsewy2df')
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 1000)
txid = w0.sendtoaddress(addr, 10)
self.nodes[0].generate(6)
self.sync_all()
send_txid = wmulti_priv.sendtoaddress(w0.getnewaddress(), 8)
decoded = wmulti_priv.decoderawtransaction(wmulti_priv.gettransaction(send_txid)['hex'])
assert_equal(len(decoded['vin'][0]['txinwitness']), 4)
self.nodes[0].generate(6)
self.sync_all()
self.nodes[1].createwallet(wallet_name="wmulti_pub", disable_private_keys=True, blank=True, descriptors=True)
wmulti_pub = self.nodes[1].get_wallet_rpc("wmulti_pub")
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 0)
self.test_importdesc({"desc":"wsh(multi(2,[7b2d0242/84h/0h/0h]tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*,[59b09cd6/84h/0h/0h]tpubDDBF2BTR6s8drwrfDei8WxtckGuSm1cyoKxYY1QaKSBFbHBYQArWhHPA6eJrzZej6nfHGLSURYSLHr7GuYch8aY5n61tGqgn8b4cXrMuoPH/*,[e81a0532/84h/0h/0h]tpubDCsWoW1kuQB9kG5MXewHqkbjPtqPueRnXju7uM2NK7y3JYb2ajAZ9EiuZXNNuE4661RAfriBWhL8UsnAPpk8zrKKnZw1Ug7X4oHgMdZiU4E/*))#tsry0s5e",
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_pub)
self.test_importdesc({"desc":"wsh(multi(2,[7b2d0242/84h/1h/0h]tpubDCXqdwWZcszwqYJSnZp8eARkxGJfHAk23KDxbztV4BbschfaTfYLTcSkSJ3TN64dRqwa1rnFUScsYormKkGqNbbPwkorQimVevXjxzUV9Gf/*,[59b09cd6/84h/1h/0h]tpubDCYfZY2ceyHzYzMMVPt9MNeiqtQ2T7Uyp9QSFwYXh8Vi9iJFYXcuphJaGXfF3jUQJi5Y3GMNXvM11gaL4txzZgNGK22BFAwMXynnzv4z2Jh/*,[e81a0532/84h/1h/0h]tpubDC6UGqnsQStngYuGD4MKsMy7eD1Yg9NTJfPdvjdG2JE5oZ7EsSL3WHg4Gsw2pR5K39ZwJ46M1wZayhedVdQtMGaUhq5S23PH6fnENK3V1sb/*))#c08a2rzv",
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_pub)
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 1000) # The first one was already consumed by previous import and is detected as used
addr = wmulti_pub.getnewaddress('', 'bech32')
assert_equal(addr, 'ncrt1qp8s25ckjl7gr6x2q3dx3tn2pytwp05upkjztk6ey857tt50r5aeqwp24f4') # Derived at m/84'/0'/0'/1
change_addr = wmulti_pub.getrawchangeaddress('bech32')
assert_equal(change_addr, 'ncrt1qt9uhe3a9hnq7vajl7a094z4s3crm9ttf8zw3f5v9gr2nyd7e3lnsewy2df')
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 999)
txid = w0.sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
self.nodes[0].generate(6)
self.sync_all()
assert_equal(wmulti_pub.getbalance(), wmulti_priv.getbalance())
# Make sure that descriptor wallets containing multiple xpubs in a single descriptor load correctly
wmulti_pub.unloadwallet()
self.nodes[1].loadwallet('wmulti_pub')
self.log.info("Multisig with distributed keys")
self.nodes[1].createwallet(wallet_name="wmulti_priv1", descriptors=True)
wmulti_priv1 = self.nodes[1].get_wallet_rpc("wmulti_priv1")
res = wmulti_priv1.importdescriptors([
{
"desc": descsum_create("wsh(multi(2,tprv8ZgxMBicQKsPevADjDCWsa6DfhkVXicu8NQUzfibwX2MexVwW4tCec5mXdCW8kJwkzBRRmAay1KZya4WsehVvjTGVW6JLqiqd8DdZ4xSg52/84h/0h/0h/*,[59b09cd6/84h/0h/0h]tpubDDBF2BTR6s8drwrfDei8WxtckGuSm1cyoKxYY1QaKSBFbHBYQArWhHPA6eJrzZej6nfHGLSURYSLHr7GuYch8aY5n61tGqgn8b4cXrMuoPH/*,[e81a0532/84h/0h/0h]tpubDCsWoW1kuQB9kG5MXewHqkbjPtqPueRnXju7uM2NK7y3JYb2ajAZ9EiuZXNNuE4661RAfriBWhL8UsnAPpk8zrKKnZw1Ug7X4oHgMdZiU4E/*))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
},
{
"desc": descsum_create("wsh(multi(2,tprv8ZgxMBicQKsPevADjDCWsa6DfhkVXicu8NQUzfibwX2MexVwW4tCec5mXdCW8kJwkzBRRmAay1KZya4WsehVvjTGVW6JLqiqd8DdZ4xSg52/84h/1h/0h/*,[59b09cd6/84h/1h/0h]tpubDCYfZY2ceyHzYzMMVPt9MNeiqtQ2T7Uyp9QSFwYXh8Vi9iJFYXcuphJaGXfF3jUQJi5Y3GMNXvM11gaL4txzZgNGK22BFAwMXynnzv4z2Jh/*,[e81a0532/84h/1h/0h]tpubDC6UGqnsQStngYuGD4MKsMy7eD1Yg9NTJfPdvjdG2JE5oZ7EsSL3WHg4Gsw2pR5K39ZwJ46M1wZayhedVdQtMGaUhq5S23PH6fnENK3V1sb/*))"),
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
assert_equal(res[0]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
assert_equal(res[1]['success'], True)
assert_equal(res[1]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
self.nodes[1].createwallet(wallet_name='wmulti_priv2', blank=True, descriptors=True)
wmulti_priv2 = self.nodes[1].get_wallet_rpc('wmulti_priv2')
res = wmulti_priv2.importdescriptors([
{
"desc": descsum_create("wsh(multi(2,[7b2d0242/84h/0h/0h]tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*,tprv8ZgxMBicQKsPdSNWUhDiwTScDr6JfkZuLshTRwzvZGnMSnGikV6jxpmdDkC3YRc4T3GD6Nvg9uv6hQg73RVv1EiTXDZwxVbsLugVHU8B1aq/84h/0h/0h/*,[e81a0532/84h/0h/0h]tpubDCsWoW1kuQB9kG5MXewHqkbjPtqPueRnXju7uM2NK7y3JYb2ajAZ9EiuZXNNuE4661RAfriBWhL8UsnAPpk8zrKKnZw1Ug7X4oHgMdZiU4E/*))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
},
{
"desc": descsum_create("wsh(multi(2,[7b2d0242/84h/1h/0h]tpubDCXqdwWZcszwqYJSnZp8eARkxGJfHAk23KDxbztV4BbschfaTfYLTcSkSJ3TN64dRqwa1rnFUScsYormKkGqNbbPwkorQimVevXjxzUV9Gf/*,tprv8ZgxMBicQKsPdSNWUhDiwTScDr6JfkZuLshTRwzvZGnMSnGikV6jxpmdDkC3YRc4T3GD6Nvg9uv6hQg73RVv1EiTXDZwxVbsLugVHU8B1aq/84h/1h/0h/*,[e81a0532/84h/1h/0h]tpubDC6UGqnsQStngYuGD4MKsMy7eD1Yg9NTJfPdvjdG2JE5oZ7EsSL3WHg4Gsw2pR5K39ZwJ46M1wZayhedVdQtMGaUhq5S23PH6fnENK3V1sb/*))"),
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
assert_equal(res[0]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
assert_equal(res[1]['success'], True)
assert_equal(res[1]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
rawtx = self.nodes[1].createrawtransaction([{'txid': txid, 'vout': vout}], {w0.getnewaddress(): 9.999})
tx_signed_1 = wmulti_priv1.signrawtransactionwithwallet(rawtx)
assert_equal(tx_signed_1['complete'], False)
tx_signed_2 = wmulti_priv2.signrawtransactionwithwallet(tx_signed_1['hex'])
assert_equal(tx_signed_2['complete'], True)
self.nodes[1].sendrawtransaction(tx_signed_2['hex'])
self.log.info("Combo descriptors cannot be active")
self.test_importdesc({"desc": descsum_create("combo(tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*)"),
"active": True,
"range": 1,
"timestamp": "now"},
success=False,
error_code=-4,
error_message="Combo descriptors cannot be set to active")
self.log.info("Descriptors with no type cannot be active")
self.test_importdesc({"desc": descsum_create("pk(tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*)"),
"active": True,
"range": 1,
"timestamp": "now"},
success=True,
warnings=["Unknown output type, cannot set descriptor to active."])
if __name__ == '__main__':
ImportDescriptorsTest().main()
| mit | 4,546,330,020,616,663,000 | 56.707424 | 464 | 0.59319 | false |
Shiva-Iyer/kepler | pykepler/riseset.py | 1 | 3379 | # riseset.py - Wrapper for celestial body rise/transit/set times
# Copyright (C) 2016 Shiva Iyer <shiva.iyer AT g m a i l DOT c o m>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
if __name__ == "__main__":
exit()
from ctypes import *
from pykepler import _libkepler
def riseset(df, ra, dec, gast, lon, lat, delt, h0):
"""Calculate rise/transit/set times for an object given its
positions in equatorial coordinates.
df -- List of day fractions, each in the range [0,1]. Positions
must be given for one full day at intervals of 6 hours or
smaller for satisfactory results, especially for the Moon
and Mercury.
ra -- RA of the object in radians at the times in <df>.
dec -- Declination of the object in radians at the times in <df>.
gast -- Greenwich apparent sidereal time in radians at <df[0]>.
lon -- Observer's longitude in radians, positive east of Greenwich.
lat -- Observer's latitude in radians, positive north of the equator.
delt -- Delta-T in seconds at <df[0]>.
h0 -- Correction to use for atmospheric refraction in radians.
Return: rts[0] = rise, rts[1] = transit, rts[2] = setting times,
all in UTC day fractions in the range [0,1]. Values will be -1
for objects that don't rise/transit/set.
"""
N = len(df)
rts = (c_double*3)()
_libkepler.riseset(c_int(N),
cast((c_double*N)(*df), POINTER(c_double)),
cast((c_double*N)(*ra), POINTER(c_double)),
cast((c_double*N)(*dec), POINTER(c_double)),
c_double(gast),
c_double(lon),
c_double(lat),
c_double(delt),
c_double(h0),
pointer(rts))
return(rts[0], rts[1], rts[2])
def interpolate(X, Y, xint):
"""Interpolate using Lagrange's interpolation formula.
X -- x-values for interpolation.
Y -- y-values for interpolation.
xint -- Interpolant.
Return: Interpolated y-value corresponding to <xint>.
"""
N = len(X)
return(_libkepler.interpolate(c_int(N),
cast((c_double*N)(*X), POINTER(c_double)),
cast((c_double*N)(*Y), POINTER(c_double)),
c_double(xint)))
_libkepler.riseset.argtypes = [
c_int,
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
c_double,
c_double,
c_double,
c_double,
c_double,
POINTER(c_double*3)
]
_libkepler.interpolate.restype = c_double
_libkepler.interpolate.argtypes = [
c_int,
POINTER(c_double),
POINTER(c_double),
c_double
]
__all__ = [
"riseset",
"interpolate"
]
| gpl-3.0 | -8,199,793,369,504,019,000 | 32.79 | 76 | 0.606096 | false |
tylertian/Openstack | openstack F/glance/glance/registry/api/v1/members.py | 1 | 13468 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.openstack.common.log as logging
LOG = logging.getLogger(__name__)
class Controller(object):
def _check_can_access_image_members(self, context):
if context.owner is None and not context.is_admin:
raise webob.exc.HTTPUnauthorized(_("No authenticated user"))
def __init__(self):
self.db_api = glance.db.get_api()
self.db_api.configure_db()
def index(self, req, image_id):
"""
Get the members of an image.
"""
try:
self.db_api.image_get(req.context, image_id)
except exception.NotFound:
msg = _("Image %(id)s not found")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
except exception.Forbidden:
# If it's private and doesn't belong to them, don't let on
# that it exists
msg = _("Access denied to image %(id)s but returning 'not found'")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
members = self.db_api.image_member_find(req.context, image_id=image_id)
msg = _("Returning member list for image %(id)s")
LOG.info(msg % {'id': image_id})
return dict(members=make_member_list(members,
member_id='member',
can_share='can_share'))
@utils.mutating
def update_all(self, req, image_id, body):
"""
Replaces the members of the image with those specified in the
body. The body is a dict with the following format::
{"memberships": [
{"member_id": <MEMBER_ID>,
["can_share": [True|False]]}, ...
]}
"""
self._check_can_access_image_members(req.context)
# Make sure the image exists
session = self.db_api.get_session()
try:
image = self.db_api.image_get(req.context, image_id,
session=session)
except exception.NotFound:
msg = _("Image %(id)s not found")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
except exception.Forbidden:
# If it's private and doesn't belong to them, don't let on
# that it exists
msg = _("Access denied to image %(id)s but returning 'not found'")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
# Can they manipulate the membership?
if not self.db_api.is_image_sharable(req.context, image):
msg = _("User lacks permission to share image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("No permission to share that image")
raise webob.exc.HTTPForbidden(msg)
# Get the membership list
try:
memb_list = body['memberships']
except Exception, e:
# Malformed entity...
msg = _("Invalid membership association specified for "
"image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("Invalid membership association: %s") % e
raise webob.exc.HTTPBadRequest(explanation=msg)
add = []
existing = {}
# Walk through the incoming memberships
for memb in memb_list:
try:
datum = dict(image_id=image['id'],
member=memb['member_id'],
can_share=None)
except Exception, e:
# Malformed entity...
msg = _("Invalid membership association specified for "
"image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("Invalid membership association: %s") % e
raise webob.exc.HTTPBadRequest(explanation=msg)
# Figure out what can_share should be
if 'can_share' in memb:
datum['can_share'] = bool(memb['can_share'])
# Try to find the corresponding membership
members = self.db_api.image_member_find(req.context,
image_id=datum['image_id'],
member=datum['member'],
session=session)
try:
member = members[0]
except IndexError:
# Default can_share
datum['can_share'] = bool(datum['can_share'])
add.append(datum)
else:
# Are we overriding can_share?
if datum['can_share'] is None:
datum['can_share'] = members[0]['can_share']
existing[member['id']] = {
'values': datum,
'membership': member,
}
# We now have a filtered list of memberships to add and
# memberships to modify. Let's start by walking through all
# the existing image memberships...
existing_members = self.db_api.image_member_find(req.context,
image_id=image['id'])
for memb in existing_members:
if memb['id'] in existing:
# Just update the membership in place
update = existing[memb['id']]['values']
self.db_api.image_member_update(req.context, memb, update,
session=session)
else:
# Outdated one; needs to be deleted
self.db_api.image_member_delete(req.context, memb,
session=session)
# Now add the non-existent ones
for memb in add:
self.db_api.image_member_create(req.context, memb, session=session)
# Make an appropriate result
msg = _("Successfully updated memberships for image %(id)s")
LOG.info(msg % {'id': image_id})
return webob.exc.HTTPNoContent()
@utils.mutating
def update(self, req, image_id, id, body=None):
"""
Adds a membership to the image, or updates an existing one.
If a body is present, it is a dict with the following format::
{"member": {
"can_share": [True|False]
}}
If "can_share" is provided, the member's ability to share is
set accordingly. If it is not provided, existing memberships
remain unchanged and new memberships default to False.
"""
self._check_can_access_image_members(req.context)
# Make sure the image exists
try:
image = self.db_api.image_get(req.context, image_id)
except exception.NotFound:
msg = _("Image %(id)s not found")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
except exception.Forbidden:
# If it's private and doesn't belong to them, don't let on
# that it exists
msg = _("Access denied to image %(id)s but returning 'not found'")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
# Can they manipulate the membership?
if not self.db_api.is_image_sharable(req.context, image):
msg = _("User lacks permission to share image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("No permission to share that image")
raise webob.exc.HTTPForbidden(msg)
# Determine the applicable can_share value
can_share = None
if body:
try:
can_share = bool(body['member']['can_share'])
except Exception, e:
# Malformed entity...
msg = _("Invalid membership association specified for "
"image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("Invalid membership association: %s") % e
raise webob.exc.HTTPBadRequest(explanation=msg)
# Look up an existing membership...
session = self.db_api.get_session()
members = self.db_api.image_member_find(req.context,
image_id=image_id,
member=id,
session=session)
if members:
if can_share is not None:
values = dict(can_share=can_share)
self.db_api.image_member_update(req.context, members[0],
values, session=session)
else:
values = dict(image_id=image['id'], member=id,
can_share=bool(can_share))
self.db_api.image_member_create(req.context, values,
session=session)
msg = _("Successfully updated a membership for image %(id)s")
LOG.info(msg % {'id': image_id})
return webob.exc.HTTPNoContent()
@utils.mutating
def delete(self, req, image_id, id):
"""
Removes a membership from the image.
"""
self._check_can_access_image_members(req.context)
# Make sure the image exists
try:
image = self.db_api.image_get(req.context, image_id)
except exception.NotFound:
msg = _("Image %(id)s not found")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
except exception.Forbidden:
# If it's private and doesn't belong to them, don't let on
# that it exists
msg = _("Access denied to image %(id)s but returning 'not found'")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
# Can they manipulate the membership?
if not self.db_api.is_image_sharable(req.context, image):
msg = _("User lacks permission to share image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("No permission to share that image")
raise webob.exc.HTTPForbidden(msg)
# Look up an existing membership
try:
session = self.db_api.get_session()
members = self.db_api.image_member_find(req.context,
image_id=image_id,
member=id,
session=session)
self.db_api.image_member_delete(req.context,
members[0],
session=session)
except exception.NotFound:
pass
# Make an appropriate result
msg = _("Successfully deleted a membership from image %(id)s")
LOG.info(msg % {'id': image_id})
return webob.exc.HTTPNoContent()
def index_shared_images(self, req, id):
"""
Retrieves images shared with the given member.
"""
try:
members = self.db_api.image_member_find(req.context, member=id)
except exception.NotFound, e:
msg = _("Member %(id)s not found")
LOG.info(msg % {'id': id})
msg = _("Membership could not be found.")
raise webob.exc.HTTPBadRequest(explanation=msg)
msg = _("Returning list of images shared with member %(id)s")
LOG.info(msg % {'id': id})
return dict(shared_images=make_member_list(members,
image_id='image_id',
can_share='can_share'))
def make_member_list(members, **attr_map):
"""
Create a dict representation of a list of members which we can use
to serialize the members list. Keyword arguments map the names of
optional attributes to include to the database attribute.
"""
def _fetch_memb(memb, attr_map):
return dict([(k, memb[v]) for k, v in attr_map.items()
if v in memb.keys()])
# Return the list of members with the given attribute mapping
return [_fetch_memb(memb, attr_map) for memb in members
if not memb.deleted]
def create_resource():
"""Image members resource factory method."""
deserializer = wsgi.JSONRequestDeserializer()
serializer = wsgi.JSONResponseSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
| apache-2.0 | -568,843,902,991,611,650 | 39.444444 | 79 | 0.529032 | false |
jsenko/repour | repour/server/server.py | 1 | 3266 | import asyncio
import logging
from aiohttp import web
from .endpoint import cancel
from .endpoint import endpoint
from ..adjust import adjust
from .. import clone
from .. import pull
from .. import repo
from .endpoint import validation
from ..auth import auth
from ..config import config
logger = logging.getLogger(__name__)
#
# Setup
#
shutdown_callbacks = []
@asyncio.coroutine
def init(loop, bind, repo_provider, adjust_provider):
logger.debug("Running init")
c = yield from config.get_configuration()
auth_provider = c.get('auth', {}).get('provider', None)
logger.info("Using auth provider '" + str(auth_provider) + "'.")
app = web.Application(loop=loop, middlewares=[auth.providers[auth_provider]] if auth_provider else {})
logger.debug("Adding application resources")
app["repo_provider"] = repo.provider_types[repo_provider["type"]](**repo_provider["params"])
if repo_provider["type"] == "modeb":
logger.warn("Mode B selected, guarantees rescinded")
pull_source = endpoint.validated_json_endpoint(shutdown_callbacks, validation.pull_modeb, pull.pull)
adjust_source = endpoint.validated_json_endpoint(shutdown_callbacks, validation.adjust_modeb, adjust.adjust)
else:
pull_source = endpoint.validated_json_endpoint(shutdown_callbacks, validation.pull, pull.pull)
adjust_source = endpoint.validated_json_endpoint(shutdown_callbacks, validation.adjust, adjust.adjust)
logger.debug("Setting up handlers")
app.router.add_route("POST", "/pull", pull_source)
app.router.add_route("POST", "/adjust", adjust_source)
app.router.add_route("POST", "/clone", endpoint.validated_json_endpoint(shutdown_callbacks, validation.clone, clone.clone))
app.router.add_route("POST", "/cancel", cancel.handle_cancel)
logger.debug("Creating asyncio server")
srv = yield from loop.create_server(app.make_handler(), bind["address"], bind["port"])
for socket in srv.sockets:
logger.info("Server started on socket: {}".format(socket.getsockname()))
def start_server(bind, repo_provider, adjust_provider):
logger.debug("Starting server")
loop = asyncio.get_event_loop()
# Monkey patch for Python 3.4.1
if not hasattr(loop, "create_task"):
loop.create_task = lambda c: asyncio.async(c, loop=loop)
loop.run_until_complete(init(
loop=loop,
bind=bind,
repo_provider=repo_provider,
adjust_provider=adjust_provider,
))
try:
loop.run_forever()
except KeyboardInterrupt:
logger.debug("KeyboardInterrupt")
finally:
logger.info("Stopping tasks")
tasks = asyncio.Task.all_tasks()
for task in tasks:
task.cancel()
results = loop.run_until_complete(asyncio.gather(*tasks, loop=loop, return_exceptions=True))
for shutdown_callback in shutdown_callbacks:
shutdown_callback()
exception_results = [r for r in results if
isinstance(r, Exception) and not isinstance(r, asyncio.CancelledError)]
if len(exception_results) > 1:
raise Exception(exception_results)
elif len(exception_results) == 1:
raise exception_results[0]
loop.close()
| apache-2.0 | 3,060,298,820,635,262,000 | 34.89011 | 127 | 0.676975 | false |
johncfaver/iddqd | cgi-bin/createadmin.py | 1 | 1279 | #!/usr/bin/env python
##
# createadmin.py
# Runs when an admin promotes a user to admin.
# Must verify that request was from admin with correct token.
#
import psycopg2, cgi,cgitb,string,random
from sys import exit
cgitb.enable(display=0,logdir="../log/",format="text")
import config
form=cgi.FieldStorage()
keys=form.keys()
if 'userid' in keys:
userid = int(form['userid'].value)
else:
userid = 0
if 'token' in keys:
token = form['token'].value
else:
token=''
if 'upgradeuserid' in keys:
upgradeuserid = form['upgradeuserid'].value
else:
upgradeuserid=''
if (not userid or not token or not upgradeuserid):
config.returnhome(52)
exit()
try:
dbconn=psycopg2.connect(config.dsn)
q=dbconn.cursor()
#Check that request is from admin user with valid token.
q.execute('SELECT count(*) FROM tokens t LEFT JOIN users u ON t.userid=u.userid WHERE u.userid=%s AND u.isadmin=true AND t.token=%s',[userid,token])
r = q.fetchone()
assert(r[0]==1)
#Request seems valid, promote the upgradeuserid
q.execute('UPDATE users SET isadmin=true WHERE userid=%s',[upgradeuserid])
dbconn.commit()
q.close()
dbconn.close()
print 'Location: ../admin.php \n\n'
except Exception:
config.returnhome(53)
| gpl-3.0 | -6,717,818,911,261,644,000 | 22.685185 | 152 | 0.680219 | false |
cculianu/bitcoin-abc | test/functional/wallet_labels.py | 1 | 7161 | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test label RPCs.
RPCs tested are:
- getaddressesbylabel
- listaddressgroupings
- setlabel
"""
from collections import defaultdict
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class WalletLabelsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Check that there's no UTXO on the node
node = self.nodes[0]
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generatetoaddress(
nblocks=1, address=node.getnewaddress(
label='coinbase'))
node.generatetoaddress(
nblocks=101,
address=node.getnewaddress(
label='coinbase'))
assert_equal(node.getbalance(), 100)
# there should be 2 address groups
# each with 1 address with a balance of 50 Bitcoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 3)
assert_equal(address_group[0][1], 50)
assert_equal(address_group[0][2], 'coinbase')
linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr"
node.sendmany(
amounts={common_address: 100},
subtractfeefrom=[common_address],
minconf=1,
)
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" label has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
amount_to_send = 1.0
# Create labels and make sure subsequent label API calls
# recognize the label/address associations.
labels = [Label(name)
for name in ("a", "b", "c", "d", "e")]
for label in labels:
address = node.getnewaddress(label.name)
label.add_receive_address(address)
label.verify(node)
# Check all labels are returned by listlabels.
assert_equal(node.listlabels(), sorted(
['coinbase'] + [label.name for label in labels]))
# Send a transaction to each label.
for label in labels:
node.sendtoaddress(label.addresses[0], amount_to_send)
label.verify(node)
# Check the amounts received.
node.generate(1)
for label in labels:
assert_equal(
node.getreceivedbyaddress(label.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbylabel(label.name), amount_to_send)
for i, label in enumerate(labels):
to_label = labels[(i + 1) % len(labels)]
node.sendtoaddress(to_label.addresses[0], amount_to_send)
node.generate(1)
for label in labels:
address = node.getnewaddress(label.name)
label.add_receive_address(address)
label.verify(node)
assert_equal(node.getreceivedbylabel(label.name), 2)
label.verify(node)
node.generate(101)
# Check that setlabel can assign a label to a new unused address.
for label in labels:
address = node.getnewaddress()
node.setlabel(address, label.name)
label.add_address(address)
label.verify(node)
assert_raises_rpc_error(-11,
"No addresses with label",
node.getaddressesbylabel,
"")
# Check that addmultisigaddress can assign labels.
for label in labels:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(
5, addresses, label.name)['address']
label.add_address(multisig_address)
label.purpose[multisig_address] = "send"
label.verify(node)
node.generate(101)
# Check that setlabel can change the label of an address from a
# different label.
change_label(node, labels[0].addresses[0], labels[0], labels[1])
# Check that setlabel can set the label of an address already
# in the label. This is a no-op.
change_label(node, labels[2].addresses[0], labels[2], labels[2])
class Label:
def __init__(self, name):
# Label name
self.name = name
# Current receiving address associated with this label.
self.receive_address = None
# List of all addresses assigned with this label
self.addresses = []
# Map of address to address purpose
self.purpose = defaultdict(lambda: "receive")
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
for address in self.addresses:
assert_equal(
node.getaddressinfo(address)['labels'][0],
{"name": self.name,
"purpose": self.purpose[address]})
assert_equal(node.getaddressinfo(address)['label'], self.name)
assert_equal(
node.getaddressesbylabel(self.name),
{address: {"purpose": self.purpose[address]} for address in self.addresses})
def change_label(node, address, old_label, new_label):
assert_equal(address in old_label.addresses, True)
node.setlabel(address, new_label.name)
old_label.addresses.remove(address)
new_label.add_address(address)
old_label.verify(node)
new_label.verify(node)
if __name__ == '__main__':
WalletLabelsTest().main()
| mit | 1,248,417,646,825,572,000 | 36.103627 | 88 | 0.609272 | false |
fracpete/python-weka-wrapper | python/weka/flow/container.py | 1 | 7925 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# container.py
# Copyright (C) 2015 Fracpete (pythonwekawrapper at gmail dot com)
import re
from weka.core.dataset import Instances
class Container(object):
"""
Container for storing multiple objects and passing them around together in the flow.
"""
def __init__(self):
"""
Initializes the container.
"""
self._data = {}
self._allowed = []
def get(self, name):
"""
Returns the stored data.
:param name: the name of the item to return
:type name: str
:return: the data
:rtype: object
"""
return self._data[name]
def set(self, name, value):
"""
Stores the given data (if not None).
:param name: the name of the item to store
:type name: str
:param value: the value to store
:type value: object
"""
if value is not None:
self._data[name] = value
@property
def allowed(self):
"""
Returns the all the allowed keys.
:return: the list of allowed keys.
:rtype: list
"""
return self._allowed
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return True
def __str__(self):
"""
Returns the content of the container as string.
:return: the content
:rtype: str
"""
return str(self._data)
def generate_help(self):
"""
Generates a help string for this container.
:return: the help string
:rtype: str
"""
result = []
result.append(self.__class__.__name__)
result.append(re.sub(r'.', '=', self.__class__.__name__))
result.append("")
result.append("Supported value names:")
for a in self.allowed:
result.append(a)
return '\n'.join(result)
def print_help(self):
"""
Prints a help string for this actor to stdout.
"""
print(self.generate_help())
class ModelContainer(Container):
"""
Container for models.
"""
def __init__(self, model=None, header=None):
"""
Initializes the container.
:param model: the model to store (eg Classifier or Clusterer)
:type model: object
:param header: the header instances
:type header: Instances
"""
super(ModelContainer, self).__init__()
self.set("Model", model)
if header is not None:
header = Instances.template_instances(header)
self.set("Header", header)
self._allowed = ["Model", "Header"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Model" in self._data) or ("Model" in self._data and "Header" in self._data)
class AttributeSelectionContainer(Container):
"""
Container for models.
"""
def __init__(self, original=None, reduced=None, num_atts=None, selected=None, results=None):
"""
Initializes the container.
:param original: the original dataset
:type original: Instances
:param reduced: the reduced dataset
:type reduced: Instances
:param num_atts: the number of attributes
:type num_atts: int
:param selected: the list of selected attribute indices (0-based)
:type selected: list
:param results: the generated results string
:type results: str
"""
super(AttributeSelectionContainer, self).__init__()
self.set("Original", original)
self.set("Reduced", reduced)
self.set("NumAttributes", num_atts)
self.set("Selected", selected)
self.set("Results", results)
self._allowed = ["Original", "Reduced", "NumAttributes", "Selected", "Results"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Reduced" in self._data) and ("NumAttributes" in self._data) and ("Selected" in self._data)
class ModelContainer(Container):
"""
Container for models.
"""
def __init__(self, model=None, header=None):
"""
Initializes the container.
:param model: the model to store (eg Classifier or Clusterer)
:type model: object
:param header: the header instances
:type header: Instances
"""
super(ModelContainer, self).__init__()
self.set("Model", model)
if header is not None:
header = Instances.template_instances(header)
self.set("Header", header)
self._allowed = ["Model", "Header"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Model" in self._data) or ("Model" in self._data and "Header" in self._data)
class ClassificationContainer(Container):
"""
Container for predictions (classifiers).
"""
def __init__(self, inst=None, classification=None, label=None, distribution=None):
"""
Initializes the container.
:param inst: the instance used for making the prediction
:type inst: Instance
:param classification: the classification (numeric value or 0-based label index)
:type classification: float
:param label: classification label (for nominal classes)
:type label: str
:param distribution: the class distribution
:type distribution: ndarray
"""
super(ClassificationContainer, self).__init__()
self.set("Instance", inst)
self.set("Classification", classification)
self.set("Label", label)
self.set("Distribution", distribution)
self._allowed = ["Instance", "Classification", "Label", "Distribution"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Instance" in self._data) and ("Classification" in self._data)
class ClusteringContainer(Container):
"""
Container for predictions (clusterers).
"""
def __init__(self, inst=None, cluster=None, distribution=None):
"""
Initializes the container.
:param inst: the instance used for making the prediction
:type inst: Instance
:param cluster: the cluster
:type cluster: int
:param distribution: the class distribution
:type distribution: ndarray
"""
super(ClusteringContainer, self).__init__()
self.set("Instance", inst)
self.set("Cluster", cluster)
self.set("Distribution", distribution)
self._allowed = ["Instance", "Cluster", "Distribution"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Instance" in self._data) and ("Cluster" in self._data)
| gpl-3.0 | -7,964,519,696,073,500,000 | 28.243542 | 107 | 0.592303 | false |
clwainwright/CosmoTransitions | cosmoTransitions/finiteT.py | 1 | 10960 | """
This module provides the functions for the one-loop finite
temperature corrections to a potential in QFT. The two basic
functions are:
Jb(x) = int[0->inf] dy +y^2 log( 1 - exp(-sqrt(x^2 + y^2)) )
Jf(x) = int[0->inf] dy -y^2 log( 1 + exp(-sqrt(x^2 + y^2)) )
Call them by:
Jb(x, approx='high', deriv=0, n = 8)
Here, approx can either be 'exact', 'spline', 'high', or 'low'.
Exact calculates the integral numerically, while high and low
calculate the high and low x expansions of J to order n.
Specify the derivative with the 'deriv' parameter.
"""
import os
import numpy
from scipy import integrate, interpolate
from scipy import special
try:
from scipy.misc import factorial as fac
except ImportError:
from scipy.special import factorial as fac
pi = numpy.pi
euler_gamma = 0.577215661901532
log, exp, sqrt = numpy.log, numpy.exp, numpy.sqrt
array = numpy.array
spline_data_path = os.path.dirname(__file__)
# The following are the exact integrals:
def _Jf_exact(x):
f = lambda y: -y*y*log(1+exp(-sqrt(y*y+x*x)))
if(x.imag == 0):
x = abs(x)
return integrate.quad(f, 0, numpy.inf)[0]
else:
f1 = lambda y: -y*y*log(2*abs(numpy.cos(sqrt(abs(x*x)-y*y)/2)))
return (
integrate.quad(f1,0,abs(x))[0] +
integrate.quad(f,abs(x),numpy.inf)[0]
)
def _Jf_exact2(theta):
# Note that this is a function of theta so that you can get negative values
f = lambda y: -y*y*log(1+exp(-sqrt(y*y+theta))).real
if theta >= 0:
return integrate.quad(f, 0, numpy.inf)[0]
else:
f1 = lambda y: -y*y*log(2*abs(numpy.cos(sqrt(-theta-y*y)/2)))
return (
integrate.quad(f, abs(theta)**.5, numpy.inf)[0] +
integrate.quad(f1, 0, abs(theta)**.5)[0]
)
def _Jb_exact(x):
f = lambda y: y*y*log(1-exp(-sqrt(y*y+x*x)))
if(x.imag == 0):
x = abs(x)
return integrate.quad(f, 0, numpy.inf)[0]
else:
f1 = lambda y: y*y*log(2*abs(numpy.sin(sqrt(abs(x*x)-y*y)/2)))
return (
integrate.quad(f1,0,abs(x))[0] +
integrate.quad(f,abs(x),numpy.inf)[0]
)
def _Jb_exact2(theta):
# Note that this is a function of theta so that you can get negative values
f = lambda y: y*y*log(1-exp(-sqrt(y*y+theta))).real
if theta >= 0:
return integrate.quad(f, 0, numpy.inf)[0]
else:
f1 = lambda y: y*y*log(2*abs(numpy.sin(sqrt(-theta-y*y)/2)))
return (
integrate.quad(f, abs(theta)**.5, numpy.inf)[0] +
integrate.quad(f1, 0, abs(theta)**.5)[0]
)
def _dJf_exact(x):
f = lambda y: y*y*(exp(sqrt(y*y+x*x))+1)**-1*x/sqrt(y*y+x*x)
return integrate.quad(f, 0, numpy.inf)[0]
def _dJb_exact(x):
f = lambda y: y*y*(exp(sqrt(y*y+x*x))-1)**-1*x/sqrt(y*y+x*x)
return integrate.quad(f, 0, numpy.inf)[0]
def arrayFunc(f, x, typ=float):
# This function allows a 1D array to be passed to something that
# normally can't handle it
i = 0
try:
n = len(x)
except:
return f(x) # x isn't an array
s = numpy.empty(n, typ)
while(i < n):
try:
s[i] = f(x[i])
except:
s[i] = numpy.NaN
i += 1
return s
def Jf_exact(x):
"""Jf calculated directly from the integral."""
return arrayFunc(_Jf_exact, x, complex)
def Jf_exact2(theta):
"""Jf calculated directly form the integral; input is theta = x^2."""
return arrayFunc(_Jf_exact2, theta)
def Jb_exact(x):
"""Jb calculated directly from the integral."""
return arrayFunc(_Jb_exact, x)
def Jb_exact2(theta):
"""Jb calculated directly form the integral; input is theta = x^2."""
return arrayFunc(_Jb_exact2, theta)
def dJf_exact(x):
"""dJf/dx calculated directly from the integral."""
return arrayFunc(_dJf_exact, x)
def dJb_exact(x):
"""dJb/dx calculated directly from the integral."""
return arrayFunc(_dJb_exact, x)
# Spline fitting, Jf
_xfmin = -6.82200203 # -11.2403168
_xfmax = 1.35e3
_Jf_dat_path = spline_data_path+"/finiteT_f.dat.txt"
if os.path.exists(_Jf_dat_path):
_xf, _yf = numpy.loadtxt(_Jf_dat_path).T
else:
# x = |xmin|*sinh(y), where y in linear
# (so that we're not overpopulating the uniteresting region)
_xf = numpy.linspace(numpy.arcsinh(-1.3*20),
numpy.arcsinh(-20*_xfmax/_xfmin), 1000)
_xf = abs(_xfmin)*numpy.sinh(_xf)/20
_yf = Jf_exact2(_xf)
numpy.savetxt(_Jf_dat_path, numpy.array([_xf, _yf]).T)
_tckf = interpolate.splrep(_xf, _yf)
def Jf_spline(X,n=0):
"""Jf interpolated from a saved spline. Input is (m/T)^2."""
X = numpy.array(X, copy=False)
x = X.ravel()
y = interpolate.splev(x,_tckf, der=n).ravel()
y[x < _xfmin] = interpolate.splev(_xfmin,_tckf, der=n)
y[x > _xfmax] = 0
return y.reshape(X.shape)
# Spline fitting, Jb
_xbmin = -3.72402637
# We're setting the lower acceptable bound as the point where it's a minimum
# This guarantees that it's a monatonically increasing function, and the first
# deriv is continuous.
_xbmax = 1.41e3
_Jb_dat_path = spline_data_path+"/finiteT_b.dat.txt"
if os.path.exists(_Jb_dat_path):
_xb, _yb = numpy.loadtxt(_Jb_dat_path).T
else:
# x = |xmin|*sinh(y), where y in linear
# (so that we're not overpopulating the uniteresting region)
_xb = numpy.linspace(numpy.arcsinh(-1.3*20),
numpy.arcsinh(-20*_xbmax/_xbmin), 1000)
_xb = abs(_xbmin)*numpy.sinh(_xb)/20
_yb = Jb_exact2(_xb)
numpy.savetxt(_Jb_dat_path, numpy.array([_xb, _yb]).T)
_tckb = interpolate.splrep(_xb, _yb)
def Jb_spline(X,n=0):
"""Jb interpolated from a saved spline. Input is (m/T)^2."""
X = numpy.array(X, copy=False)
x = X.ravel()
y = interpolate.splev(x,_tckb, der=n).ravel()
y[x < _xbmin] = interpolate.splev(_xbmin,_tckb, der=n)
y[x > _xbmax] = 0
return y.reshape(X.shape)
# Now for the low x expansion (require that n <= 50)
a,b,c,d = -pi**4/45, pi*pi/12, -pi/6, -1/32.
logab = 1.5 - 2*euler_gamma + 2*log(4*pi)
l = numpy.arange(50)+1
g = (-2*pi**3.5 * (-1)**l*(1+special.zetac(2*l+1)) *
special.gamma(l+.5)/(fac(l+2)*(2*pi)**(2*l+4)))
lowCoef_b = (a,b,c,d,logab,l,g)
del (a,b,c,d,logab,l,g) # clean up name space
a,b,d = -7*pi**4/360, pi*pi/24, 1/32.
logaf = 1.5 - 2*euler_gamma + 2*log(pi)
l = numpy.arange(50)+1
g = (.25*pi**3.5 * (-1)**l*(1+special.zetac(2*l+1)) *
special.gamma(l+.5)*(1-.5**(2*l+1))/(fac(l+2)*pi**(2*l+4)))
lowCoef_f = (a,b,d,logaf,l,g)
del (a,b,d,logaf,l,g) # clean up name space
def Jb_low(x,n=20):
"""Jb calculated using the low-x (high-T) expansion."""
(a,b,c,d,logab,l,g) = lowCoef_b
y = a + x*x*(b + x*(c + d*x*(numpy.nan_to_num(log(x*x)) - logab)))
i = 1
while i <= n:
y += g[i-1]*x**(2*i+4)
i += 1
return y
def Jf_low(x,n=20):
"""Jf calculated using the low-x (high-T) expansion."""
(a,b,d,logaf,l,g) = lowCoef_f
y = a + x*x*(b + d*x*x*(numpy.nan_to_num(log(x*x)) - logaf))
i = 1
while i <= n:
y += g[i-1]*x**(2*i+4)
i += 1
return y
# The next few functions are all for the high approximation
def x2K2(k,x):
y = -x*x*special.kn(2, k*x)/(k*k)
if(isinstance(x, numpy.ndarray)):
y[x == 0] = numpy.ones(len(y[x == 0]))*-2.0/k**4
elif(x == 0):
return -2.0/k**4
return y
def dx2K2(k,x):
y = abs(x)
return numpy.nan_to_num(x*y*special.kn(1,k*y)/k)
def d2x2K2(k,x):
x = abs(x)
y = numpy.nan_to_num(x*(special.kn(1,k*x)/k - x*special.kn(0,k*x)))
if(isinstance(x, numpy.ndarray)):
y[x == 0] = numpy.ones(len(y[x == 0]))*1.0/k**2
elif(x == 0):
return 1.0/k**2
return y
def d3x2K2(k,x):
y = abs(x)
return numpy.nan_to_num(x*(y*k*special.kn(1,k*y) - 3*special.kn(0,k*y)))
def Jb_high(x, deriv=0, n=8):
"""Jb calculated using the high-x (low-T) expansion."""
K = (x2K2, dx2K2, d2x2K2, d3x2K2)[deriv]
y, k = 0.0, 1
while k <= n:
y += K(k,x)
k += 1
return y
def Jf_high(x, deriv=0, n=8):
"""Jf calculated using the high-x (low-T) expansion."""
K = (x2K2, dx2K2, d2x2K2, d3x2K2)[deriv]
y, k, i = 0.0, 1, 1
while k <= n:
y += i*K(k,x)
i *= -1
k += 1
return y
# And here are the final functions:
# Note that if approx = 'spline', the function called is
# J(theta) (x^2 -> theta so you can get negative mass squared)
def Jb(x, approx='high', deriv=0, n=8):
"""
A shorthand for calling one of the Jb functions above.
Parameters
----------
approx : str, optional
One of 'exact', 'high', 'low', or 'spline'.
deriv : int, optional
The order of the derivative (0 for no derivative).
Must be <= (1, 3, 0, 3) for approx = (exact, high, low, spline).
n : int, optional
Number of terms to use in the low and high-T approximations.
"""
if(approx == 'exact'):
if(deriv == 0):
return Jb_exact(x)
elif(deriv == 1):
return dJb_exact(x)
else:
raise ValueError("For approx=='exact', deriv must be 0 or 1.")
elif(approx == 'spline'):
return Jb_spline(x, deriv)
elif(approx == 'low'):
if(n > 100):
raise ValueError("Must have n <= 100")
if(deriv == 0):
return Jb_low(x,n)
else:
raise ValueError("For approx=='low', deriv must be 0.")
elif(approx == 'high'):
if(deriv > 3):
raise ValueError("For approx=='high', deriv must be 3 or less.")
else:
return Jb_high(x, deriv, n)
raise ValueError("Unexpected value for 'approx'.")
def Jf(x, approx='high', deriv=0, n=8):
"""
A shorthand for calling one of the Jf functions above.
Parameters
----------
approx : str, optional
One of 'exact', 'high', 'low', or 'spline'.
deriv : int, optional
The order of the derivative (0 for no derivative).
Must be <= (1, 3, 0, 3) for approx = (exact, high, low, spline).
n : int, optional
Number of terms to use in the low and high-T approximations.
"""
if(approx == 'exact'):
if(deriv == 0):
return Jf_exact(x)
elif(deriv == 1):
return dJf_exact(x)
else:
raise ValueError("For approx=='exact', deriv must be 0 or 1.")
elif(approx == 'spline'):
return Jf_spline(x, deriv)
elif(approx == 'low'):
if(n > 100):
raise ValueError("Must have n <= 100")
if(deriv == 0):
return Jf_low(x,n)
else:
raise ValueError("For approx=='low', deriv must be 0.")
elif(approx == 'high'):
if(deriv > 3):
raise ValueError("For approx=='high', deriv must be 3 or less.")
else:
return Jf_high(x, deriv, n)
raise ValueError("Unexpected value for 'approx'.")
| mit | 2,406,444,055,076,283,400 | 28.226667 | 79 | 0.565967 | false |
rcfox/CGen | test/test_functions.py | 1 | 2882 | from cgen.blocks import *
def test_basic_function():
src = SourceFile()
with src.function('main', Type('int'), [Variable('argc', Type('int')), Variable('argv', Type('char').pointer().pointer())]) as f:
f.variable('foo', Type('int'), value=42)
f.return_statement(0)
assert f.output() == \
'''
int main(int argc, char** argv) {
int foo = 42;
return 0;
}
'''.strip()
def test_function_if():
src = SourceFile()
with src.function('main', Type('int'), [Variable('argc', Type('int')), Variable('argv', Type('char').pointer().pointer())]) as f:
f.variable('foo', Type('int'), value=42)
with f.if_statement('foo == 42') as ifs:
ifs.return_statement(0)
f.return_statement(1)
assert src.output() == \
'''
int main(int argc, char** argv) {
int foo = 42;
if (foo == 42) {
return 0;
}
return 1;
}'''
def test_function_while():
src = SourceFile()
with src.function('main', Type('int'), [Variable('argc', Type('int')), Variable('argv', Type('char').pointer().pointer())]) as f:
f.variable('foo', Type('int'), value=42)
with f.while_statement('foo == 42') as ifs:
ifs.set('foo', 43)
f.return_statement(1)
assert src.output() == \
'''
int main(int argc, char** argv) {
int foo = 42;
while (foo == 42) {
foo = 43;
}
return 1;
}'''
def test_function_for():
src = SourceFile()
with src.function('main', Type('int'), [Variable('argc', Type('int')), Variable('argv', Type('char').pointer().pointer())]) as f:
f.variable('foo', Type('int'), value=42)
with f.for_statement('foo = 0', 'foo < 10', '++foo') as ifs:
pass
f.return_statement(0)
assert src.output() == \
'''
int main(int argc, char** argv) {
int foo = 42;
for (foo = 0; foo < 10; ++foo) {
}
return 0;
}'''
def test_function_def_var():
src = SourceFile()
with src.function('main', Type('int'), [Variable('argc', Type('int')), Variable('argv', Type('char').pointer().pointer())]) as f:
f.variable('foo', Type('int'), value=42)
with f.for_statement(Variable('bar', Type('int'), value=0), 'bar < foo', '++bar') as ifs:
pass
f.return_statement(0)
assert src.output() == \
'''
int main(int argc, char** argv) {
int foo = 42;
for (int bar = 0; bar < foo; ++bar) {
}
return 0;
}'''
def test_function_call_function():
src = SourceFile()
with src.function('main', Type('int'), [Variable('argc', Type('int')), Variable('argv', Type('char').pointer().pointer())]) as f:
f.variable('foo', Type('int'), value=42)
f.call('printf', [r'"foo %d\n"', 'foo'])
f.return_statement(0)
assert src.output() == \
r'''
int main(int argc, char** argv) {
int foo = 42;
printf("foo %d\n", foo);
return 0;
}'''
| mit | -2,230,830,704,056,779,800 | 29.659574 | 133 | 0.543373 | false |
isthatme/openHomeControl | src/A10-LIME/mqtt_PCA9685_first_copy.py | 1 | 2431 | #!/usr/bin/env python
import paho.mqtt.client as mqtt
from pyA10Lime import i2c
import time
'''
Overall design of the program:
Set up mqtt
set up the PCA9685
sub to relevent channels
how do channels work?
devID/controllerID/pinNum maybe?
so A10-1/0x42/4 would reference pyA10 number 1, with a controller addressed at 0x42, pin 4 (5th pin)
example message contents:
subject: A10-1/0x42/4
message: 75
this would set the above pin to a value of 75
don't bother with the LED_ALL stuff, it is kinda useless
'''
'''
TODO:
maybe get a config file set up
ERROR HANDLING
fade-in and fade-out
maybe randomising start times to minimize peak current draw?
'''
clientID = "A10-1" #maybe change this to the hostmname?
mqttBroker = "something" #the URL/IP of the mqtt broker
driverNum = 1 #number of LED drivers needed to initialize
def on_connect(client, userdata, flags, rc):
print("Connected")
client.subscribe(clientID + "/#") #subscribe for things
def on_message(client, userdata, msg):
print("Topic is: " + msg.topic)
print("Message is: " + msg.payload)
topic = msg.topic.split()
if topic[0] = clientID:
devAddr = int(topic[1])
pin = int(topic[1])
value = int(msg.payload)
endTime = (4096 * value) / 100 #value is percent and you set the end time based on % of 4096
register = 6 + (pin * 4) #used to find the register number
i2c.open(devAddr)
i2c.write([register, 0x00]) #LEDn_ON_X starts at 0
register++
i2c.write([register, 0x00])
register++
i2c.write([register, endTime]) #LEDn_OFF_X is defined by endTime
register++
i2c.write{[register, endTime >> 8])
i2c.close() #a more efficient way would be to auto-increment
#might have to auto increment when I implement fading
else:
print("Wrong topic") #later I should add diagnostic topics and the such
def init():
i2c.init("/dev/i2c-0"); #we'll be using i2c-0, I think
client = mqtt.Client(clientID) #create a client with an ID
client.on_connect = on_connect
client.on_message = on_message
client.connect(mqttBroker, 1883, 60)
client.loop_start() #when do we stop this loop?
def PCA9685_init():
addr = 0x40
j = driverNum
while j > 0:
i2c.open(addr)
i2c.write([0x00, 0x80]) #reset
i2c.write([0x00, 0x10]) #sleep
i2c.write([0xFE, 0x1E]) #PRE_SCALE to 200Hz
i2c.write([0x00, 0x00]) #wake up
i2c.close()
addr += 1
j -= 1
| bsd-2-clause | 8,889,925,543,877,168,000 | 20.324561 | 100 | 0.679144 | false |
EvilDako/PyTraining | test/test_edit_contact.py | 1 | 1268 | __author__ = 'dako'
# -*- coding: utf-8 -*-
from model.contact import Contact
import random
def test_edit_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="test"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
contact_new = Contact(firstname="Petr", middlename="Petrovich", lastname="Petrov", nickname="PETRO", title="Mr", company="SUPERCOMPANY_2", address="Moscow, Old Arbat, 10",
tel_home="595555555", tel_mobile="89009009091", tel_work="495123555", tel_fax="+799999999", email="[email protected]", email2="[email protected]", email3="[email protected]", homepage="www.petrusha.com",
address2="none_2", phone2="none_2", notes="too many funny comments")
contact_new.id = contact.id
app.contact.edit_contact_by_id(contact.id, contact_new)
new_contacts = db.get_contact_list()
assert len(old_contacts) == app.contact.count()
#old_contacts[index] = contact
#assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max) | gpl-2.0 | 2,127,943,103,430,762,500 | 59.428571 | 221 | 0.665615 | false |
AdrianGaudebert/socorro-crashstats | crashstats/api/views.py | 1 | 6278 | import re
import datetime
from django import http
from django.shortcuts import render
from django.contrib.sites.models import RequestSite
from django.core.urlresolvers import reverse
from django.conf import settings
from django import forms
from ratelimit.decorators import ratelimit
from waffle.decorators import waffle_switch
from crashstats.crashstats import models
from crashstats.crashstats import utils
from .cleaner import Cleaner
class APIWhitelistError(Exception):
pass
class MultipleStringField(forms.TypedMultipleChoiceField):
"""Field that do not validate if the field values are in self.choices"""
def to_python(self, value):
"""Override checking method"""
return map(self.coerce, value)
def validate(self, value):
"""Nothing to do here"""
if self.required and not value:
raise forms.ValidationError(self.error_messages['required'])
TYPE_MAP = {
basestring: forms.CharField,
list: MultipleStringField,
datetime.date: forms.DateField,
datetime.datetime: forms.DateTimeField,
int: forms.IntegerField,
}
def fancy_init(self, model, *args, **kwargs):
self.model = model
self.__old_init__(*args, **kwargs)
for parameter in model.get_annotated_params():
required = parameter['required']
name = parameter['name']
if parameter['type'] not in TYPE_MAP:
raise NotImplementedError(parameter['type'])
field_class = TYPE_MAP[parameter['type']]
self.fields[name] = field_class(required=required)
class FormWrapperMeta(forms.Form.__metaclass__):
def __new__(cls, name, bases, attrs):
attrs['__old_init__'] = bases[0].__init__
attrs['__init__'] = fancy_init
return super(FormWrapperMeta, cls).__new__(cls, name, bases, attrs)
class FormWrapper(forms.Form):
__metaclass__ = FormWrapperMeta
# Names of models we don't want to serve at all
BLACKLIST = (
# not because it's sensitive but because it's only used for writes
'ReleasesFeatured',
)
@waffle_switch('app_api_all')
@ratelimit(method=['GET', 'POST', 'PUT'], rate='10/m')
@utils.json_view
def model_wrapper(request, model_name):
if model_name in BLACKLIST:
raise http.Http404("Don't know what you're talking about!")
try:
model = getattr(models, model_name)
except AttributeError:
raise http.Http404('no model called `%s`' % model_name)
# XXX use RatelimitMiddleware instead of this in case
# we ratelimit multiple views
if getattr(request, 'limited', False):
# http://tools.ietf.org/html/rfc6585#page-3
return http.HttpResponse('Too Many Requests', status=429)
instance = model()
if request.method == 'POST':
function = instance.post
else:
function = instance.get
form = FormWrapper(model, request.REQUEST)
if form.is_valid():
try:
result = function(**form.cleaned_data)
except models.BadStatusCodeError as e:
try:
error_code = int(str(e).split(':')[0].strip())
if error_code >= 400 and error_code < 500:
return http.HttpResponse(e, status=error_code)
if error_code >= 500:
return http.HttpResponse(e, status=424)
except Exception:
# that means we can't assume that the BadStatusCodeError
# has a typically formatted error message
pass
raise
except ValueError as e:
if 'No JSON object could be decoded' in e:
return http.HttpResponse(
'Not a valid JSON response',
status=400
)
raise
# it being set to None means it's been deliberately disabled
if getattr(model, 'API_WHITELIST', -1) == -1:
raise APIWhitelistError('No API_WHITELIST defined for %r' % model)
clean_scrub = getattr(model, 'API_CLEAN_SCRUB', None)
if model.API_WHITELIST:
cleaner = Cleaner(
model.API_WHITELIST,
clean_scrub=clean_scrub,
# if True, uses warnings.warn() to show fields not whitelisted
debug=settings.DEBUG,
)
cleaner.start(result)
else:
result = {'errors': dict(form.errors)}
return result
@waffle_switch('app_api_all')
def documentation(request):
endpoints = [
]
for name in dir(models):
model = getattr(models, name)
try:
if not issubclass(model, models.SocorroMiddleware):
continue
if model is models.SocorroMiddleware:
continue
if model.__name__ in BLACKLIST:
continue
except TypeError:
# most likely a builtin class or something
continue
endpoints.append(_describe_model(model))
base_url = (
'%s://%s' % (request.is_secure() and 'https' or 'http',
RequestSite(request).domain)
)
data = {
'endpoints': endpoints,
'base_url': base_url,
}
return render(request, 'api/documentation.html', data)
def _describe_model(model):
params = list(model.get_annotated_params())
params.sort(key=lambda x: (not x['required'], x['name']))
methods = []
if model.get:
methods.append('GET')
elif models.post:
methods.append('POST')
docstring = model.__doc__
if docstring:
docstring = dedent_left(docstring.rstrip(), 4)
data = {
'name': model.__name__,
'url': reverse('api:model_wrapper', args=(model.__name__,)),
'parameters': params,
'defaults': getattr(model, 'defaults', {}),
'methods': methods,
'docstring': docstring,
}
return data
def dedent_left(text, spaces):
"""
If the string is:
' One\n'
' Two\n'
'Three\n'
And you set @spaces=2
Then return this:
' One\n'
' Two\n'
'Three\n'
"""
lines = []
regex = re.compile('^\s{%s}' % spaces)
for line in text.splitlines():
line = regex.sub('', line)
lines.append(line)
return '\n'.join(lines)
| mpl-2.0 | 5,001,584,025,281,107,000 | 28.336449 | 78 | 0.597005 | false |
timurbakibayev/trains | tutu/views.py | 1 | 9410 | from django.shortcuts import render
from django.http import HttpResponseRedirect
from tutu.models import Track
from django.views.decorators.csrf import csrf_exempt
from tutu.models import Switch
from tutu import draw
def index(request):
tracks = Track.objects.all()
# draw.something()
tracks_plus = []
for track in tracks:
t = {"id": track.id, "name": track.name, "start_name": track.start_name}
t["length"] = track.length()
tracks_plus.append(t)
context = {"tracks": tracks_plus}
return render(request, 'index.html', context)
def reset(request):
tracks = Track.objects.all()
for i in tracks:
i.simulation_in_progress = False
i.save()
# draw.something()
tracks_plus = []
for track in tracks:
t = {"id": track.id, "name": track.name, "start_name": track.start_name, "length": track.length()}
tracks_plus.append(t)
context = {"tracks": tracks_plus}
return render(request, 'index.html', context)
def new_track(request):
context = {}
if request.method == "POST":
track_name = request.POST["track_name"]
if (track_name is None) or (track_name == ""):
return render(request, "new_tarif.html", context)
t = Track()
t.name = track_name
t.start_name = request.POST["track_start_name"]
t.length = 0
try:
t.length = float(request.POST["track_length"])
except:
pass
t.save()
return HttpResponseRedirect("/")
return render(request, 'new_track.html', context={})
def new_switch(request, track_id):
try:
track = Track.objects.get(pk=int(track_id))
except:
return render(request, "error.html")
context = {"track": track}
if request.method == "POST":
switch_name = request.POST["switch_name"]
if (switch_name is None) or (switch_name == ""):
return render(request, "new_switch.html", context)
s = Switch()
s.track_id = track.id
s.name = switch_name
try:
s.position = float(request.POST["switch_position"])
except:
s.position = 0
try:
s.mins_acc = float(request.POST["switch_acc"])
except:
s.mins_acc = 0
try:
s.mins_main_fw = float(request.POST["switch_main_fw"])
except:
s.mins_main_fw = 0
try:
s.mins_main_bk = float(request.POST["switch_main_bk"])
except:
s.mins_main_bk = 0
try:
s.mins_station = float(request.POST["switch_station"])
except:
s.mins_station = 0
try:
s.mins_brk = float(request.POST["switch_brk"])
except:
s.mins_brk = 0
try:
s.number_of_tracks = float(request.POST["switch_number_of_tracks"])
except:
pass
try:
s.trains_fit = float(request.POST["switch_trains_fit"])
except:
pass
s.save()
return HttpResponseRedirect("/track/" + track_id)
return render(request, 'new_switch.html', context=context)
@csrf_exempt
def edit_track(request, track_id):
try:
t = Track.objects.get(pk=int(track_id))
except:
return render(request, "error.html")
context = {"track": t}
if request.method == "POST":
track_name = request.POST["track_name"]
if (track_name is None) or (track_name == ""):
pass
else:
t.name = track_name
t.start_name = request.POST["track_start_name"]
t.length = 0
try:
t.length = float(request.POST["track_length"])
except:
pass
try:
t.number_of_passenger_trains = float(request.POST["number_of_passenger_trains"])
except:
pass
try:
t.number_of_cargo_trains = float(request.POST["number_of_cargo_trains"])
except:
pass
try:
t.density_netto = float(request.POST["density_netto"])
except:
pass
t.save()
return HttpResponseRedirect("/")
return render(request, "edit_track.html", context)
@csrf_exempt
def delete_track(request, track_id):
try:
t = Track.objects.get(pk=int(track_id))
except:
return render(request, "error.html")
context = {"track": t}
if request.method == "POST":
t.delete()
return HttpResponseRedirect("/")
return render(request, "delete_track.html", context)
@csrf_exempt
def delete_switch(request, track_id, switch_id):
try:
t = Track.objects.get(pk=int(track_id))
s = Switch.objects.get(pk=int(switch_id))
except:
return render(request, "error.html")
context = {"switch": s, "track": t}
if request.method == "POST":
s.delete()
return HttpResponseRedirect("/track/" + track_id)
return render(request, "delete_switch.html", context)
@csrf_exempt
def edit_switch(request, track_id, switch_id):
try:
t = Track.objects.get(pk=int(track_id))
s = Switch.objects.get(pk=int(switch_id))
except:
return render(request, "error.html")
context = {"switch": s, "track": t}
if request.method == "POST":
switch_name = request.POST["switch_name"]
if (switch_name is None) or (switch_name == ""):
pass
else:
s.name = switch_name
try:
s.position = float(request.POST["switch_position"])
except:
pass
try:
s.mins_acc = float(request.POST["switch_acc"])
except:
s.mins_acc = 0
try:
s.mins_main_fw = float(request.POST["switch_main_fw"])
except:
s.mins_main_fw = 0
try:
s.mins_main_bk = float(request.POST["switch_main_bk"])
except:
s.mins_main_bk = 0
try:
s.mins_brk = float(request.POST["switch_brk"])
except:
s.mins_brk = 0
try:
s.mins_station = float(request.POST["switch_station"])
except:
s.mins_station = 0
try:
s.number_of_tracks = float(request.POST["switch_number_of_tracks"])
except:
pass
try:
s.trains_fit = float(request.POST["switch_trains_fit"])
except:
pass
s.save()
return HttpResponseRedirect("/track/" + track_id)
return render(request, "edit_switch.html", context)
def round(a):
return "%.3f" % a
@csrf_exempt
def show_track(request, track_id):
try:
track = Track.objects.get(pk=int(track_id))
switches_orig = Switch.objects.filter(track_id=track_id)
except:
return render(request, "error.html")
switches = []
prev_pos = 0
worst = 1000
for i, switch in enumerate(switches_orig):
new_sw = {"switch": switch, "sum":
switch.mins_acc + switch.mins_brk + switch.mins_main_fw +
switch.mins_main_bk + switch.mins_station}
single = int((60 * 23) * 0.96 / new_sw["sum"])
double = single * int(new_sw["sum"] / 8)
new_sw["capacity"] = (double, single)[switch.number_of_tracks < 2]
new_sw["number"] = i + 2
length = switch.position - prev_pos
new_sw["length"] = length
time = new_sw["sum"] / 60
new_sw["speed"] = int(float(length) / time * 10) / 10
new_sw["nalich"] = new_sw["capacity"] - \
(((track.number_of_cargo_trains + track.number_of_passenger_trains) / 0.85) -
(track.number_of_cargo_trains + track.number_of_passenger_trains)) - track.number_of_passenger_trains
new_sw["potreb"] = (track.number_of_cargo_trains + track.number_of_passenger_trains) / 0.85
new_sw["reserve_pairs"] = new_sw["nalich"] - new_sw["potreb"]
new_sw["train_weight"] = (track.density_netto * 1000000) / track.number_of_cargo_trains / 365
new_sw["reserve_cargo"] = new_sw["train_weight"] * new_sw["reserve_pairs"] * 365 / 1000000
new_sw["reserve_cargo_f"] = new_sw["reserve_cargo"]
if new_sw["reserve_cargo_f"] < worst:
worst = new_sw["reserve_cargo_f"]
new_sw["positive"] = new_sw["reserve_cargo"] > 0
new_sw["nalich"] = round(new_sw["nalich"])
new_sw["potreb"] = round(new_sw["potreb"])
new_sw["reserve_pairs"] = round(new_sw["reserve_pairs"])
new_sw["reserve_cargo"] = round(new_sw["reserve_cargo"])
new_sw["train_weight"] = round(new_sw["train_weight"])
switches.append(new_sw)
prev_pos = switch.position
switches_last_stage = []
for i in switches:
i["worst"] = (i["reserve_cargo_f"] == worst)
switches_last_stage.append(i)
context = {"track": track, "switches": switches_last_stage}
return render(request, "show_track.html", context)
def thumbnail_track(request, track_id):
try:
track = Track.objects.get(pk=int(track_id))
except:
return render(request, "error.html")
return draw.draw_track(track)
| gpl-3.0 | 1,335,998,830,436,119,000 | 32.133803 | 129 | 0.547928 | false |
cloudera/hue | desktop/core/src/desktop/lib/raz/raz_client.py | 1 | 7363 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
import socket
import sys
import uuid
import requests
import requests_kerberos
from datetime import datetime, timedelta
from desktop.conf import AUTH_USERNAME
from desktop.lib.exceptions_renderable import PopupException
import desktop.lib.raz.signer_protos_pb2 as raz_signer
if sys.version_info[0] > 2:
from urllib.parse import urlparse as lib_urlparse
else:
from urlparse import urlparse as lib_urlparse
LOG = logging.getLogger(__name__)
class RazToken:
def __init__(self, raz_url, auth_handler):
self.raz_url = raz_url
self.auth_handler = auth_handler
self.init_time = datetime.now()
self.raz_token = None
o = lib_urlparse(self.raz_url)
if not o.netloc:
raise PopupException('Could not parse the host of the Raz server %s' % self.raz_url)
self.raz_hostname, self.raz_port = o.netloc.split(':')
self.scheme = o.scheme
def get_delegation_token(self, user):
ip_address = socket.gethostbyname(self.raz_hostname)
GET_PARAMS = {"op": "GETDELEGATIONTOKEN", "service": "%s:%s" % (ip_address, self.raz_port), "renewer": AUTH_USERNAME.get(), "doAs": user}
r = requests.get(self.raz_url, GET_PARAMS, auth=self.auth_handler, verify=False)
self.raz_token = json.loads(r.text)['Token']['urlString']
return self.raz_token
def renew_delegation_token(self, user):
if self.raz_token is None:
self.raz_token = self.get_delegation_token(user=user)
if (self.init_time - timedelta(hours=8)) > datetime.now():
r = requests.put("%s?op=RENEWDELEGATIONTOKEN&token=%s"%(self.raz_url, self.raz_token), auth=self.auth_handler, verify=False)
return self.raz_token
class RazClient(object):
def __init__(self, raz_url, raz_token, username, service='s3', service_name='cm_s3', cluster_name='myCluster'):
self.raz_url = raz_url.strip('/')
self.raz_token = raz_token
self.username = username
self.service = service
if self.service == 'adls':
self.service_params = {
'endpoint_prefix': 'adls',
'service_name': 'adls',
'serviceType': 'adls'
}
else:
self.service_params = {
'endpoint_prefix': 's3',
'service_name': 's3',
'serviceType': 's3'
}
self.service_name = service_name
self.cluster_name = cluster_name
self.requestid = str(uuid.uuid4())
def check_access(self, method, url, params=None, headers=None):
LOG.debug("Check access: method {%s}, url {%s}, params {%s}, headers {%s}" % (method, url, params, headers))
path = lib_urlparse(url)
url_params = dict([p.split('=') if '=' in p else (p, '') for p in path.query.split('&') if path.query]) # ?delete, ?prefix=/hue
params = params if params is not None else {}
headers = headers if headers is not None else {}
allparams = [raz_signer.StringListStringMapProto(key=key, value=[val]) for key, val in url_params.items()]
allparams.extend([raz_signer.StringListStringMapProto(key=key, value=[val]) for key, val in params.items()])
headers = [raz_signer.StringStringMapProto(key=key, value=val) for key, val in headers.items()]
endpoint = "%s://%s" % (path.scheme, path.netloc)
resource_path = path.path.lstrip("/")
LOG.debug(
"Preparing sign request with http_method: {%s}, headers: {%s}, parameters: {%s}, endpoint: {%s}, resource_path: {%s}" %
(method, headers, allparams, endpoint, resource_path)
)
raz_req = raz_signer.SignRequestProto(
endpoint_prefix=self.service_params['endpoint_prefix'],
service_name=self.service_params['service_name'],
endpoint=endpoint,
http_method=method,
headers=headers,
parameters=allparams,
resource_path=resource_path,
time_offset=0
)
raz_req_serialized = raz_req.SerializeToString()
signed_request = base64.b64encode(raz_req_serialized)
request_data = {
"requestId": self.requestid,
"serviceType": self.service_params['serviceType'],
"serviceName": self.service_name,
"user": self.username,
"userGroups": [],
"accessTime": "",
"clientIpAddress": "",
"clientType": "",
"clusterName": self.cluster_name,
"clusterType": "",
"sessionId": "",
"context": {
"S3_SIGN_REQUEST": signed_request
}
}
headers = {"Content-Type":"application/json", "Accept-Encoding":"gzip,deflate"}
raz_url = "%s/api/authz/s3/access?delegation=%s" % (self.raz_url, self.raz_token)
LOG.debug('Raz url: %s' % raz_url)
LOG.debug("Sending access check headers: {%s} request_data: {%s}" % (headers, request_data))
raz_req = requests.post(raz_url, headers=headers, json=request_data, verify=False)
signed_response_result = None
signed_response = None
if raz_req.ok:
result = raz_req.json().get("operResult", False) and raz_req.json()["operResult"]["result"]
if result == "NOT_DETERMINED":
msg = "Failure %s" % raz_req.json()
LOG.error(msg)
raise PopupException(msg)
if result != "ALLOWED":
msg = "Permission missing %s" % raz_req.json()
raise PopupException(msg, error_code=401)
if result == "ALLOWED":
LOG.debug('Received allowed response %s' % raz_req.json())
signed_response_data = raz_req.json()["operResult"]["additionalInfo"]
if self.service == 'adls':
LOG.debug("Received SAS %s" % signed_response_data["ADLS_DSAS"])
return {'token': signed_response_data["ADLS_DSAS"]}
else:
signed_response_result = signed_response_data["S3_SIGN_RESPONSE"]
if signed_response_result:
raz_response_proto = raz_signer.SignResponseProto()
signed_response = raz_response_proto.FromString(base64.b64decode(signed_response_result))
LOG.debug("Received signed Response %s" % signed_response)
# Signed headers "only"
if signed_response:
return dict([(i.key, i.value) for i in signed_response.signer_generated_headers])
def get_raz_client(raz_url, username, auth='kerberos', service='s3', service_name='cm_s3', cluster_name='myCluster'):
if auth == 'kerberos' or True: # True until JWT option
auth_handler = requests_kerberos.HTTPKerberosAuth(mutual_authentication=requests_kerberos.OPTIONAL)
raz = RazToken(raz_url, auth_handler)
raz_token = raz.get_delegation_token(user=username)
return RazClient(raz_url, raz_token, username, service=service, service_name=service_name, cluster_name=cluster_name)
| apache-2.0 | -8,960,432,691,178,515,000 | 37.752632 | 141 | 0.665082 | false |
druce/safewithdrawal_tensorflow | run_safewithdrawal_linearalloc.py | 1 | 5015 | #!/home/ubuntu/anaconda2/bin/python
# MIT License
# Copyright (c) 2016 Druce Vertes [email protected]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import argparse
import pickle
from time import strftime
import sys
import os
import numpy as np
import pandas as pd
fileprefix = "best08"
bestfile = "%s.pickle" % (fileprefix)
max_unimproved_steps = 200
gamma = 8.0
#Objective: 8315.064674
# const_spend = 2.321413
# var_spend_pcts = pd.Series([0.021015604501457775, 0.021761051829444631, 0.022312098346990435, 0.022785170076322969, 0.023285983064484993, 0.023897465220170052, 0.024584673876801872, 0.02556106756991109, 0.026657864441448173, 0.028031748201320435, 0.029551066581589736, 0.031201618742953394, 0.032978432086452118, 0.034516254916809298, 0.036027857701909138, 0.037763940480250287, 0.03992129323858909, 0.042635694985269881, 0.045638329119485004, 0.049069352739346678, 0.052383268763417638, 0.056951126091794861, 0.063470193195596478, 0.070974811737827201, 0.082180160879307573, 0.098169174319082841, 0.1205906552280696, 0.15769373320000857, 0.23376809386762137, 0.51005368542831198])
# stock_allocations = pd.Series([0.82085705309182722, 0.8208564375532369, 0.80809230790394848, 0.80474242187125467, 0.80321803760810162, 0.80214299804721623, 0.80178790048600157, 0.7839705620587375, 0.77739050153152156, 0.77699016168709201, 0.77517208520407443, 0.76706047015389667, 0.76676220145412832, 0.76576837231963391, 0.76098570290996814, 0.74113354059879621, 0.73793102049167558, 0.73650905089885166, 0.72707794679494286, 0.72393066589418387, 0.7210099158662584, 0.71370848573117784, 0.7038219623712294, 0.68848317679023907, 0.61956979054659567, 0.61331107236876559, 0.59738860596743892, 0.59391944015033249, 0.59164222259062249, 0.53441829378265526])
# startval = 100
# years_retired = 30
# const_spend_pct = .02
# const_spend = startval * const_spend_pct
# var_spend_pcts = pd.Series(np.ones(years_retired) * 0.02)
# var_spend_pcts[-1]=1.0
# stock_allocations = pd.Series(np.ones(years_retired) * 0.65)
startval = 100
years_retired = 30
# 1.5% constant spending
const_spend_pct = 0.015
const_spend = startval * const_spend_pct
# var spending a function of years left
var_spend_pcts = pd.Series([ 1.0 / (years_retired - ix) - 0.01 for ix in range(years_retired)])
#Objective: 4.390120
const_spend = 1.494627
var_spend_pcts = pd.Series([0.026510001745962072, 0.027818217278890313, 0.028605532721252741, 0.028943515850045034, 0.029650425909075188, 0.030749598116744672, 0.031600262214435557, 0.032732508555050478, 0.034385383513833988, 0.036029103781616605, 0.03767831801390633, 0.039574695022857952, 0.04181956456859641, 0.043933810727326675, 0.046368133990928623, 0.049770890997431427, 0.053761145655487272, 0.058701327619542831, 0.064816641182696089, 0.072273502883599586, 0.081202909789127517, 0.0923868781223499, 0.10647268828242094, 0.1245451336773581, 0.14860396109790044, 0.18220604185509723, 0.23242068590691847, 0.31581923728426176, 0.48186646557743196, 0.98999999999999999])
start_alloc = 0.8
end_alloc = 0.5
# save starting scenario
pickle_list = [const_spend, var_spend_pcts, start_alloc, end_alloc]
pickle.dump( pickle_list, open( bestfile, "wb" ) )
# start with a learning rate that learns quickly, gradually reduce it
# run once with 50 or 100 steps to see which learning rates are effective
# then plug in that solution and run each til no improvement for a large number of steps
for learning_rate in [
#0.00001, # too coarse, may be NaN
0.00003, # too coarse, may be NaN
0.000001, # coarse
0.000003, # coarse
0.0000001, # workhorse
0.00000003,
0.00000001, # diminishing returns
#0.000000003,
#0.000000001, #superfine
#0.0000000003,
#0.0000000001,
#0.00000000001,
]:
cmdstr = './safewithdrawal_linearalloc.py %.12f %d %f %s' % (learning_rate, max_unimproved_steps, gamma, fileprefix)
print(cmdstr)
os.system(cmdstr)
| mit | -5,372,145,557,114,190,000 | 48.653465 | 683 | 0.771087 | false |
cemarchi/biosphere | Src/BioDataManagement/DataAccess/Entities/MessengerRnaSample.py | 1 | 1434 | from typing import List, Dict
from Src.BioDataManagement.DataAccess.Entities.BiologicalSampleBase import BiologicalSampleBase
from Src.BioDataManagement.DataAccess.Entities.GeneExpressionLevel import GeneExpressionLevel
class MessengerRnaSample(BiologicalSampleBase):
"""description of class"""
def __init__(self, **kargs):
"""
:param kargs:
"""
super().__init__(**kargs)
self.__exp_levels = kargs.get('exp_levels')
if self.__exp_levels:
self.__exp_levels = list(set([GeneExpressionLevel(**exp) for exp in self.__exp_levels]))
def __hash__(self):
return hash(self.patient_id)
def __eq__(self, other):
return isinstance(other, MessengerRnaSample) and \
self.patient_id == other.patient_id
@property
def exp_levels(self)-> List[GeneExpressionLevel]:
"""description of property"""
return self.__exp_levels[:]
@exp_levels.setter
def exp_levels(self, value: List):
"""
:param value:
:return:
"""
self.__exp_levels = list(set(value))
def validate(self):
super().validate()
for g in self.__exp_levels:
g.validate()
def as_dict(self)-> Dict:
sample_dict = super().as_dict()
sample_dict.update({'exp_levels': list(map(lambda exp: exp.as_dict(), self.__exp_levels))})
return sample_dict
| bsd-3-clause | 579,169,642,502,552,600 | 26.576923 | 100 | 0.603208 | false |
atumanov/ray | python/ray/rllib/optimizers/replay_buffer.py | 1 | 8536 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import random
import sys
from ray.rllib.optimizers.segment_tree import SumSegmentTree, MinSegmentTree
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.compression import unpack_if_needed
from ray.rllib.utils.window_stat import WindowStat
@DeveloperAPI
class ReplayBuffer(object):
@DeveloperAPI
def __init__(self, size):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
self._hit_count = np.zeros(size)
self._eviction_started = False
self._num_added = 0
self._num_sampled = 0
self._evicted_hit_stats = WindowStat("evicted_hit", 1000)
self._est_size_bytes = 0
def __len__(self):
return len(self._storage)
@DeveloperAPI
def add(self, obs_t, action, reward, obs_tp1, done, weight):
data = (obs_t, action, reward, obs_tp1, done)
self._num_added += 1
if self._next_idx >= len(self._storage):
self._storage.append(data)
self._est_size_bytes += sum(sys.getsizeof(d) for d in data)
else:
self._storage[self._next_idx] = data
if self._next_idx + 1 >= self._maxsize:
self._eviction_started = True
self._next_idx = (self._next_idx + 1) % self._maxsize
if self._eviction_started:
self._evicted_hit_stats.push(self._hit_count[self._next_idx])
self._hit_count[self._next_idx] = 0
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(unpack_if_needed(obs_t), copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(unpack_if_needed(obs_tp1), copy=False))
dones.append(done)
self._hit_count[i] += 1
return (np.array(obses_t), np.array(actions), np.array(rewards),
np.array(obses_tp1), np.array(dones))
@DeveloperAPI
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [
random.randint(0,
len(self._storage) - 1) for _ in range(batch_size)
]
self._num_sampled += batch_size
return self._encode_sample(idxes)
@DeveloperAPI
def stats(self, debug=False):
data = {
"added_count": self._num_added,
"sampled_count": self._num_sampled,
"est_size_bytes": self._est_size_bytes,
"num_entries": len(self._storage),
}
if debug:
data.update(self._evicted_hit_stats.stats())
return data
@DeveloperAPI
class PrioritizedReplayBuffer(ReplayBuffer):
@DeveloperAPI
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha > 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
self._prio_change_stats = WindowStat("reprio", 1000)
@DeveloperAPI
def add(self, obs_t, action, reward, obs_tp1, done, weight):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super(PrioritizedReplayBuffer, self).add(obs_t, action, reward,
obs_tp1, done, weight)
if weight is None:
weight = self._max_priority
self._it_sum[idx] = weight**self._alpha
self._it_min[idx] = weight**self._alpha
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
# TODO(szymon): should we ensure no repeats?
mass = random.random() * self._it_sum.sum(0, len(self._storage))
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
@DeveloperAPI
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
self._num_sampled += batch_size
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage))**(-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage))**(-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
@DeveloperAPI
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
delta = priority**self._alpha - self._it_sum[idx]
self._prio_change_stats.push(delta)
self._it_sum[idx] = priority**self._alpha
self._it_min[idx] = priority**self._alpha
self._max_priority = max(self._max_priority, priority)
@DeveloperAPI
def stats(self, debug=False):
parent = ReplayBuffer.stats(self, debug)
if debug:
parent.update(self._prio_change_stats.stats())
return parent
| apache-2.0 | -6,601,539,590,877,492,000 | 32.73913 | 77 | 0.575679 | false |
wesm/arrow | python/pyarrow/__init__.py | 1 | 20961 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# flake8: noqa
"""
PyArrow is the python implementation of Apache Arrow.
Apache Arrow is a cross-language development platform for in-memory data.
It specifies a standardized language-independent columnar memory format for
flat and hierarchical data, organized for efficient analytic operations on
modern hardware. It also provides computational libraries and zero-copy
streaming messaging and interprocess communication.
For more information see the official page at https://arrow.apache.org
"""
import gc as _gc
import os as _os
import sys as _sys
import warnings as _warnings
try:
from ._generated_version import version as __version__
except ImportError:
# Package is not installed, parse git tag at runtime
try:
import setuptools_scm
# Code duplicated from setup.py to avoid a dependency on each other
def parse_git(root, **kwargs):
"""
Parse function for setuptools_scm that ignores tags for non-C++
subprojects, e.g. apache-arrow-js-XXX tags.
"""
from setuptools_scm.git import parse
kwargs['describe_command'] = \
"git describe --dirty --tags --long --match 'apache-arrow-[0-9].*'"
return parse(root, **kwargs)
__version__ = setuptools_scm.get_version('../',
parse=parse_git)
except ImportError:
__version__ = None
# ARROW-8684: Disable GC while initializing Cython extension module,
# to workaround Cython bug in https://github.com/cython/cython/issues/3603
_gc_enabled = _gc.isenabled()
_gc.disable()
import pyarrow.lib as _lib
if _gc_enabled:
_gc.enable()
from pyarrow.lib import (BuildInfo, RuntimeInfo, VersionInfo,
cpp_build_info, cpp_version, cpp_version_info,
runtime_info, cpu_count, set_cpu_count,
enable_signal_handlers)
def show_versions():
"""
Print various version information, to help with error reporting.
"""
# TODO: CPU information and flags
print("pyarrow version info\n--------------------")
print("Package kind: {}".format(cpp_build_info.package_kind
if len(cpp_build_info.package_kind) > 0
else "not indicated"))
print("Arrow C++ library version: {0}".format(cpp_build_info.version))
print("Arrow C++ compiler: {0} {1}"
.format(cpp_build_info.compiler_id, cpp_build_info.compiler_version))
print("Arrow C++ compiler flags: {0}"
.format(cpp_build_info.compiler_flags))
print("Arrow C++ git revision: {0}".format(cpp_build_info.git_id))
print("Arrow C++ git description: {0}"
.format(cpp_build_info.git_description))
from pyarrow.lib import (null, bool_,
int8, int16, int32, int64,
uint8, uint16, uint32, uint64,
time32, time64, timestamp, date32, date64, duration,
float16, float32, float64,
binary, string, utf8,
large_binary, large_string, large_utf8,
decimal128, decimal256,
list_, large_list, map_, struct,
union, sparse_union, dense_union,
dictionary,
field,
type_for_alias,
DataType, DictionaryType, StructType,
ListType, LargeListType, MapType, FixedSizeListType,
UnionType, SparseUnionType, DenseUnionType,
TimestampType, Time32Type, Time64Type, DurationType,
FixedSizeBinaryType, Decimal128Type, Decimal256Type,
BaseExtensionType, ExtensionType,
PyExtensionType, UnknownExtensionType,
register_extension_type, unregister_extension_type,
DictionaryMemo,
KeyValueMetadata,
Field,
Schema,
schema,
unify_schemas,
Array, Tensor,
array, chunked_array, record_batch, nulls, repeat,
SparseCOOTensor, SparseCSRMatrix, SparseCSCMatrix,
SparseCSFTensor,
infer_type, from_numpy_dtype,
NullArray,
NumericArray, IntegerArray, FloatingPointArray,
BooleanArray,
Int8Array, UInt8Array,
Int16Array, UInt16Array,
Int32Array, UInt32Array,
Int64Array, UInt64Array,
ListArray, LargeListArray, MapArray,
FixedSizeListArray, UnionArray,
BinaryArray, StringArray,
LargeBinaryArray, LargeStringArray,
FixedSizeBinaryArray,
DictionaryArray,
Date32Array, Date64Array, TimestampArray,
Time32Array, Time64Array, DurationArray,
Decimal128Array, Decimal256Array, StructArray, ExtensionArray,
scalar, NA, _NULL as NULL, Scalar,
NullScalar, BooleanScalar,
Int8Scalar, Int16Scalar, Int32Scalar, Int64Scalar,
UInt8Scalar, UInt16Scalar, UInt32Scalar, UInt64Scalar,
HalfFloatScalar, FloatScalar, DoubleScalar,
Decimal128Scalar, Decimal256Scalar,
ListScalar, LargeListScalar, FixedSizeListScalar,
Date32Scalar, Date64Scalar,
Time32Scalar, Time64Scalar,
BinaryScalar, LargeBinaryScalar,
StringScalar, LargeStringScalar,
FixedSizeBinaryScalar, DictionaryScalar,
MapScalar, UnionScalar, StructScalar,
TimestampScalar, DurationScalar)
# Buffers, allocation
from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer,
Codec, compress, decompress, allocate_buffer)
from pyarrow.lib import (MemoryPool, LoggingMemoryPool, ProxyMemoryPool,
total_allocated_bytes, set_memory_pool,
default_memory_pool, system_memory_pool,
jemalloc_memory_pool, mimalloc_memory_pool,
logging_memory_pool, proxy_memory_pool,
log_memory_allocations, jemalloc_set_decay_ms)
# I/O
from pyarrow.lib import (HdfsFile, NativeFile, PythonFile,
BufferedInputStream, BufferedOutputStream,
CompressedInputStream, CompressedOutputStream,
TransformInputStream, transcoding_input_stream,
FixedSizeBufferWriter,
BufferReader, BufferOutputStream,
OSFile, MemoryMappedFile, memory_map,
create_memory_map, have_libhdfs,
MockOutputStream, input_stream, output_stream)
from pyarrow.lib import (ChunkedArray, RecordBatch, Table, table,
concat_arrays, concat_tables)
# Exceptions
from pyarrow.lib import (ArrowCancelled,
ArrowCapacityError,
ArrowException,
ArrowKeyError,
ArrowIndexError,
ArrowInvalid,
ArrowIOError,
ArrowMemoryError,
ArrowNotImplementedError,
ArrowTypeError,
ArrowSerializationError)
# Serialization
from pyarrow.lib import (deserialize_from, deserialize,
deserialize_components,
serialize, serialize_to, read_serialized,
SerializationCallbackError,
DeserializationCallbackError)
import pyarrow.hdfs as hdfs
from pyarrow.ipc import serialize_pandas, deserialize_pandas
import pyarrow.ipc as ipc
from pyarrow.serialization import (default_serialization_context,
register_default_serialization_handlers,
register_torch_serialization_handlers)
import pyarrow.types as types
# deprecated top-level access
from pyarrow.filesystem import FileSystem as _FileSystem
from pyarrow.filesystem import LocalFileSystem as _LocalFileSystem
from pyarrow.hdfs import HadoopFileSystem as _HadoopFileSystem
from pyarrow.lib import SerializationContext as _SerializationContext
from pyarrow.lib import SerializedPyObject as _SerializedPyObject
_localfs = _LocalFileSystem._get_instance()
_msg = (
"pyarrow.{0} is deprecated as of 2.0.0, please use pyarrow.fs.{1} instead."
)
_serialization_msg = (
"'pyarrow.{0}' is deprecated and will be removed in a future version. "
"Use pickle or the pyarrow IPC functionality instead."
)
_deprecated = {
"localfs": (_localfs, "LocalFileSystem"),
"FileSystem": (_FileSystem, "FileSystem"),
"LocalFileSystem": (_LocalFileSystem, "LocalFileSystem"),
"HadoopFileSystem": (_HadoopFileSystem, "HadoopFileSystem"),
}
_serialization_deprecatd = {
"SerializationContext": _SerializationContext,
"SerializedPyObject": _SerializedPyObject,
}
if _sys.version_info >= (3, 7):
def __getattr__(name):
if name in _deprecated:
obj, new_name = _deprecated[name]
_warnings.warn(_msg.format(name, new_name),
FutureWarning, stacklevel=2)
return obj
elif name in _serialization_deprecatd:
_warnings.warn(_serialization_msg.format(name),
FutureWarning, stacklevel=2)
return _serialization_deprecatd[name]
raise AttributeError(
"module 'pyarrow' has no attribute '{0}'".format(name)
)
else:
localfs = _localfs
FileSystem = _FileSystem
LocalFileSystem = _LocalFileSystem
HadoopFileSystem = _HadoopFileSystem
SerializationContext = _SerializationContext
SerializedPyObject = _SerializedPyObject
# Entry point for starting the plasma store
def _plasma_store_entry_point():
"""Entry point for starting the plasma store.
This can be used by invoking e.g.
``plasma_store -s /tmp/plasma -m 1000000000``
from the command line and will start the plasma_store executable with the
given arguments.
"""
import pyarrow
plasma_store_executable = _os.path.join(pyarrow.__path__[0],
"plasma-store-server")
_os.execv(plasma_store_executable, _sys.argv)
# ----------------------------------------------------------------------
# Deprecations
from pyarrow.util import _deprecate_api, _deprecate_class
read_message = _deprecate_api("read_message", "ipc.read_message",
ipc.read_message, "0.17.0")
read_record_batch = _deprecate_api("read_record_batch",
"ipc.read_record_batch",
ipc.read_record_batch, "0.17.0")
read_schema = _deprecate_api("read_schema", "ipc.read_schema",
ipc.read_schema, "0.17.0")
read_tensor = _deprecate_api("read_tensor", "ipc.read_tensor",
ipc.read_tensor, "0.17.0")
write_tensor = _deprecate_api("write_tensor", "ipc.write_tensor",
ipc.write_tensor, "0.17.0")
get_record_batch_size = _deprecate_api("get_record_batch_size",
"ipc.get_record_batch_size",
ipc.get_record_batch_size, "0.17.0")
get_tensor_size = _deprecate_api("get_tensor_size",
"ipc.get_tensor_size",
ipc.get_tensor_size, "0.17.0")
open_stream = _deprecate_api("open_stream", "ipc.open_stream",
ipc.open_stream, "0.17.0")
open_file = _deprecate_api("open_file", "ipc.open_file", ipc.open_file,
"0.17.0")
def _deprecate_scalar(ty, symbol):
return _deprecate_class("{}Value".format(ty), symbol, "1.0.0")
ArrayValue = _deprecate_class("ArrayValue", Scalar, "1.0.0")
NullType = _deprecate_class("NullType", NullScalar, "1.0.0")
BooleanValue = _deprecate_scalar("Boolean", BooleanScalar)
Int8Value = _deprecate_scalar("Int8", Int8Scalar)
Int16Value = _deprecate_scalar("Int16", Int16Scalar)
Int32Value = _deprecate_scalar("Int32", Int32Scalar)
Int64Value = _deprecate_scalar("Int64", Int64Scalar)
UInt8Value = _deprecate_scalar("UInt8", UInt8Scalar)
UInt16Value = _deprecate_scalar("UInt16", UInt16Scalar)
UInt32Value = _deprecate_scalar("UInt32", UInt32Scalar)
UInt64Value = _deprecate_scalar("UInt64", UInt64Scalar)
HalfFloatValue = _deprecate_scalar("HalfFloat", HalfFloatScalar)
FloatValue = _deprecate_scalar("Float", FloatScalar)
DoubleValue = _deprecate_scalar("Double", DoubleScalar)
ListValue = _deprecate_scalar("List", ListScalar)
LargeListValue = _deprecate_scalar("LargeList", LargeListScalar)
MapValue = _deprecate_scalar("Map", MapScalar)
FixedSizeListValue = _deprecate_scalar("FixedSizeList", FixedSizeListScalar)
BinaryValue = _deprecate_scalar("Binary", BinaryScalar)
StringValue = _deprecate_scalar("String", StringScalar)
LargeBinaryValue = _deprecate_scalar("LargeBinary", LargeBinaryScalar)
LargeStringValue = _deprecate_scalar("LargeString", LargeStringScalar)
FixedSizeBinaryValue = _deprecate_scalar("FixedSizeBinary",
FixedSizeBinaryScalar)
Decimal128Value = _deprecate_scalar("Decimal128", Decimal128Scalar)
Decimal256Value = _deprecate_scalar("Decimal256", Decimal256Scalar)
UnionValue = _deprecate_scalar("Union", UnionScalar)
StructValue = _deprecate_scalar("Struct", StructScalar)
DictionaryValue = _deprecate_scalar("Dictionary", DictionaryScalar)
Date32Value = _deprecate_scalar("Date32", Date32Scalar)
Date64Value = _deprecate_scalar("Date64", Date64Scalar)
Time32Value = _deprecate_scalar("Time32", Time32Scalar)
Time64Value = _deprecate_scalar("Time64", Time64Scalar)
TimestampValue = _deprecate_scalar("Timestamp", TimestampScalar)
DurationValue = _deprecate_scalar("Duration", DurationScalar)
# TODO: Deprecate these somehow in the pyarrow namespace
from pyarrow.ipc import (Message, MessageReader, MetadataVersion,
RecordBatchFileReader, RecordBatchFileWriter,
RecordBatchStreamReader, RecordBatchStreamWriter)
# ----------------------------------------------------------------------
# Returning absolute path to the pyarrow include directory (if bundled, e.g. in
# wheels)
def get_include():
"""
Return absolute path to directory containing Arrow C++ include
headers. Similar to numpy.get_include
"""
return _os.path.join(_os.path.dirname(__file__), 'include')
def _get_pkg_config_executable():
return _os.environ.get('PKG_CONFIG', 'pkg-config')
def _has_pkg_config(pkgname):
import subprocess
try:
return subprocess.call([_get_pkg_config_executable(),
'--exists', pkgname]) == 0
except FileNotFoundError:
return False
def _read_pkg_config_variable(pkgname, cli_args):
import subprocess
cmd = [_get_pkg_config_executable(), pkgname] + cli_args
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise RuntimeError("pkg-config failed: " + err.decode('utf8'))
return out.rstrip().decode('utf8')
def get_libraries():
"""
Return list of library names to include in the `libraries` argument for C
or Cython extensions using pyarrow
"""
return ['arrow', 'arrow_python']
def create_library_symlinks():
"""
With Linux and macOS wheels, the bundled shared libraries have an embedded
ABI version like libarrow.so.17 or libarrow.17.dylib and so linking to them
with -larrow won't work unless we create symlinks at locations like
site-packages/pyarrow/libarrow.so. This unfortunate workaround addresses
prior problems we had with shipping two copies of the shared libraries to
permit third party projects like turbodbc to build their C++ extensions
against the pyarrow wheels.
This function must only be invoked once and only when the shared libraries
are bundled with the Python package, which should only apply to wheel-based
installs. It requires write access to the site-packages/pyarrow directory
and so depending on your system may need to be run with root.
"""
import glob
if _sys.platform == 'win32':
return
package_cwd = _os.path.dirname(__file__)
if _sys.platform == 'linux':
bundled_libs = glob.glob(_os.path.join(package_cwd, '*.so.*'))
def get_symlink_path(hard_path):
return hard_path.rsplit('.', 1)[0]
else:
bundled_libs = glob.glob(_os.path.join(package_cwd, '*.*.dylib'))
def get_symlink_path(hard_path):
return '.'.join((hard_path.rsplit('.', 2)[0], 'dylib'))
for lib_hard_path in bundled_libs:
symlink_path = get_symlink_path(lib_hard_path)
if _os.path.exists(symlink_path):
continue
try:
_os.symlink(lib_hard_path, symlink_path)
except PermissionError:
print("Tried creating symlink {}. If you need to link to "
"bundled shared libraries, run "
"pyarrow.create_library_symlinks() as root")
def get_library_dirs():
"""
Return lists of directories likely to contain Arrow C++ libraries for
linking C or Cython extensions using pyarrow
"""
package_cwd = _os.path.dirname(__file__)
library_dirs = [package_cwd]
def append_library_dir(library_dir):
if library_dir not in library_dirs:
library_dirs.append(library_dir)
# Search library paths via pkg-config. This is necessary if the user
# installed libarrow and the other shared libraries manually and they
# are not shipped inside the pyarrow package (see also ARROW-2976).
pkg_config_executable = _os.environ.get('PKG_CONFIG') or 'pkg-config'
for pkgname in ["arrow", "arrow_python"]:
if _has_pkg_config(pkgname):
library_dir = _read_pkg_config_variable(pkgname,
["--libs-only-L"])
# pkg-config output could be empty if Arrow is installed
# as a system package.
if library_dir:
if not library_dir.startswith("-L"):
raise ValueError(
"pkg-config --libs-only-L returned unexpected "
"value {!r}".format(library_dir))
append_library_dir(library_dir[2:])
if _sys.platform == 'win32':
# TODO(wesm): Is this necessary, or does setuptools within a conda
# installation add Library\lib to the linker path for MSVC?
python_base_install = _os.path.dirname(_sys.executable)
library_dir = _os.path.join(python_base_install, 'Library', 'lib')
if _os.path.exists(_os.path.join(library_dir, 'arrow.lib')):
append_library_dir(library_dir)
# ARROW-4074: Allow for ARROW_HOME to be set to some other directory
if _os.environ.get('ARROW_HOME'):
append_library_dir(_os.path.join(_os.environ['ARROW_HOME'], 'lib'))
else:
# Python wheels bundle the Arrow libraries in the pyarrow directory.
append_library_dir(_os.path.dirname(_os.path.abspath(__file__)))
return library_dirs
| apache-2.0 | 4,812,145,373,778,390,000 | 40.589286 | 87 | 0.608606 | false |
shreyasp/erpnext | erpnext/controllers/accounts_controller.py | 1 | 28222 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw
from frappe.utils import today, flt, cint, fmt_money, formatdate, getdate
from erpnext.setup.utils import get_company_currency, get_exchange_rate
from erpnext.accounts.utils import get_fiscal_years, validate_fiscal_year, get_account_currency
from erpnext.utilities.transaction_base import TransactionBase
from erpnext.controllers.recurring_document import convert_to_recurring, validate_recurring_document
from erpnext.controllers.sales_and_purchase_return import validate_return
from erpnext.accounts.party import get_party_account_currency, validate_party_frozen_disabled
from erpnext.exceptions import InvalidCurrency
force_item_fields = ("item_group", "barcode", "brand", "stock_uom")
class AccountsController(TransactionBase):
def __init__(self, arg1, arg2=None):
super(AccountsController, self).__init__(arg1, arg2)
@property
def company_currency(self):
if not hasattr(self, "__company_currency"):
self.__company_currency = get_company_currency(self.company)
return self.__company_currency
def onload(self):
self.get("__onload").make_payment_via_journal_entry = frappe.db.get_single_value('Accounts Settings', 'make_payment_via_journal_entry')
def validate(self):
if self.get("_action") and self._action != "update_after_submit":
self.set_missing_values(for_validate=True)
self.validate_date_with_fiscal_year()
if self.meta.get_field("currency"):
self.calculate_taxes_and_totals()
if not self.meta.get_field("is_return") or not self.is_return:
self.validate_value("base_grand_total", ">=", 0)
validate_return(self)
self.set_total_in_words()
if self.doctype in ("Sales Invoice", "Purchase Invoice") and not self.is_return:
self.validate_due_date()
self.validate_advance_entries()
if self.meta.get_field("taxes_and_charges"):
self.validate_enabled_taxes_and_charges()
self.validate_party()
self.validate_currency()
if self.meta.get_field("is_recurring"):
if self.amended_from and self.recurring_id:
self.recurring_id = None
if not self.get("__islocal"):
validate_recurring_document(self)
convert_to_recurring(self, self.get("posting_date") or self.get("transaction_date"))
if self.doctype == 'Purchase Invoice':
self.validate_paid_amount()
def validate_paid_amount(self):
if hasattr(self, "is_pos") or hasattr(self, "is_paid"):
is_paid = self.get("is_pos") or self.get("is_paid")
if cint(is_paid) == 1:
if flt(self.paid_amount) == 0 and flt(self.outstanding_amount) > 0:
if self.cash_bank_account:
self.paid_amount = flt(flt(self.grand_total) - flt(self.write_off_amount),
self.precision("paid_amount"))
self.base_paid_amount = flt(self.paid_amount * self.conversion_rate, self.precision("base_paid_amount"))
else:
# show message that the amount is not paid
self.paid_amount = 0
frappe.throw(_("Note: Payment Entry will not be created since 'Cash or Bank Account' was not specified"))
else:
frappe.db.set(self,'paid_amount',0)
def on_update_after_submit(self):
if self.meta.get_field("is_recurring"):
validate_recurring_document(self)
convert_to_recurring(self, self.get("posting_date") or self.get("transaction_date"))
def set_missing_values(self, for_validate=False):
if frappe.flags.in_test:
for fieldname in ["posting_date","transaction_date"]:
if self.meta.get_field(fieldname) and not self.get(fieldname):
self.set(fieldname, today())
break
def calculate_taxes_and_totals(self):
from erpnext.controllers.taxes_and_totals import calculate_taxes_and_totals
calculate_taxes_and_totals(self)
if self.doctype in ["Quotation", "Sales Order", "Delivery Note", "Sales Invoice"]:
self.calculate_commission()
self.calculate_contribution()
def validate_date_with_fiscal_year(self):
if self.meta.get_field("fiscal_year") :
date_field = ""
if self.meta.get_field("posting_date"):
date_field = "posting_date"
elif self.meta.get_field("transaction_date"):
date_field = "transaction_date"
if date_field and self.get(date_field):
validate_fiscal_year(self.get(date_field), self.fiscal_year,
self.meta.get_label(date_field), self)
def validate_due_date(self):
from erpnext.accounts.party import validate_due_date
if self.doctype == "Sales Invoice":
if not self.due_date:
frappe.throw(_("Due Date is mandatory"))
validate_due_date(self.posting_date, self.due_date, "Customer", self.customer, self.company)
elif self.doctype == "Purchase Invoice":
validate_due_date(self.posting_date, self.due_date, "Supplier", self.supplier, self.company)
def set_price_list_currency(self, buying_or_selling):
if self.meta.get_field("currency"):
# price list part
fieldname = "selling_price_list" if buying_or_selling.lower() == "selling" \
else "buying_price_list"
if self.meta.get_field(fieldname) and self.get(fieldname):
self.price_list_currency = frappe.db.get_value("Price List",
self.get(fieldname), "currency")
if self.price_list_currency == self.company_currency:
self.plc_conversion_rate = 1.0
elif not self.plc_conversion_rate:
self.plc_conversion_rate = get_exchange_rate(
self.price_list_currency, self.company_currency)
# currency
if not self.currency:
self.currency = self.price_list_currency
self.conversion_rate = self.plc_conversion_rate
elif self.currency == self.company_currency:
self.conversion_rate = 1.0
elif not self.conversion_rate:
self.conversion_rate = get_exchange_rate(self.currency,
self.company_currency)
def set_missing_item_details(self, for_validate=False):
"""set missing item values"""
from erpnext.stock.get_item_details import get_item_details
if hasattr(self, "items"):
parent_dict = {}
for fieldname in self.meta.get_valid_columns():
parent_dict[fieldname] = self.get(fieldname)
if self.doctype in ["Quotation", "Sales Order", "Delivery Note", "Sales Invoice"]:
document_type = "{} Item".format(self.doctype)
parent_dict.update({"document_type": document_type})
for item in self.get("items"):
if item.get("item_code"):
args = parent_dict.copy()
args.update(item.as_dict())
args["doctype"] = self.doctype
args["name"] = self.name
if not args.get("transaction_date"):
args["transaction_date"] = args.get("posting_date")
if self.get("is_subcontracted"):
args["is_subcontracted"] = self.is_subcontracted
ret = get_item_details(args)
for fieldname, value in ret.items():
if item.meta.get_field(fieldname) and value is not None:
if (item.get(fieldname) is None or fieldname in force_item_fields):
item.set(fieldname, value)
elif fieldname == "cost_center" and not item.get("cost_center"):
item.set(fieldname, value)
elif fieldname == "conversion_factor" and not item.get("conversion_factor"):
item.set(fieldname, value)
if ret.get("pricing_rule"):
# if user changed the discount percentage then set user's discount percentage ?
item.set("discount_percentage", ret.get("discount_percentage"))
if ret.get("pricing_rule_for") == "Price":
item.set("pricing_list_rate", ret.get("pricing_list_rate"))
if item.price_list_rate:
item.rate = flt(item.price_list_rate *
(1.0 - (flt(item.discount_percentage) / 100.0)), item.precision("rate"))
if self.doctype == "Purchase Invoice":
self.set_expense_account(for_validate)
def set_taxes(self):
if not self.meta.get_field("taxes"):
return
tax_master_doctype = self.meta.get_field("taxes_and_charges").options
if not self.get("taxes"):
if not self.get("taxes_and_charges"):
# get the default tax master
self.set("taxes_and_charges", frappe.db.get_value(tax_master_doctype, {"is_default": 1}))
self.append_taxes_from_master(tax_master_doctype)
def append_taxes_from_master(self, tax_master_doctype=None):
if self.get("taxes_and_charges"):
if not tax_master_doctype:
tax_master_doctype = self.meta.get_field("taxes_and_charges").options
self.extend("taxes", get_taxes_and_charges(tax_master_doctype, self.get("taxes_and_charges")))
def set_other_charges(self):
self.set("taxes", [])
self.set_taxes()
def validate_enabled_taxes_and_charges(self):
taxes_and_charges_doctype = self.meta.get_options("taxes_and_charges")
if frappe.db.get_value(taxes_and_charges_doctype, self.taxes_and_charges, "disabled"):
frappe.throw(_("{0} '{1}' is disabled").format(taxes_and_charges_doctype, self.taxes_and_charges))
def get_gl_dict(self, args, account_currency=None):
"""this method populates the common properties of a gl entry record"""
fiscal_years = get_fiscal_years(self.posting_date, company=self.company)
if len(fiscal_years) > 1:
frappe.throw(_("Multiple fiscal years exist for the date {0}. Please set company in Fiscal Year").format(formatdate(self.posting_date)))
else:
fiscal_year = fiscal_years[0][0]
gl_dict = frappe._dict({
'company': self.company,
'posting_date': self.posting_date,
'fiscal_year': fiscal_year,
'voucher_type': self.doctype,
'voucher_no': self.name,
'remarks': self.get("remarks"),
'debit': 0,
'credit': 0,
'debit_in_account_currency': 0,
'credit_in_account_currency': 0,
'is_opening': self.get("is_opening") or "No",
'party_type': None,
'party': None,
'project': self.get("project")
})
gl_dict.update(args)
if not account_currency:
account_currency = get_account_currency(gl_dict.account)
if self.doctype not in ["Journal Entry", "Period Closing Voucher", "Payment Entry"]:
self.validate_account_currency(gl_dict.account, account_currency)
set_balance_in_account_currency(gl_dict, account_currency, self.get("conversion_rate"), self.company_currency)
return gl_dict
def validate_account_currency(self, account, account_currency=None):
valid_currency = [self.company_currency]
if self.get("currency") and self.currency != self.company_currency:
valid_currency.append(self.currency)
if account_currency not in valid_currency:
frappe.throw(_("Account {0} is invalid. Account Currency must be {1}")
.format(account, _(" or ").join(valid_currency)))
def clear_unallocated_advances(self, childtype, parentfield):
self.set(parentfield, self.get(parentfield, {"allocated_amount": ["not in", [0, None, ""]]}))
frappe.db.sql("""delete from `tab%s` where parentfield=%s and parent = %s
and allocated_amount = 0""" % (childtype, '%s', '%s'), (parentfield, self.name))
def set_advances(self):
"""Returns list of advances against Account, Party, Reference"""
res = self.get_advance_entries()
self.set("advances", [])
for d in res:
self.append("advances", {
"doctype": self.doctype + " Advance",
"reference_type": d.reference_type,
"reference_name": d.reference_name,
"reference_row": d.reference_row,
"remarks": d.remarks,
"advance_amount": flt(d.amount),
"allocated_amount": flt(d.amount) if d.against_order else 0
})
def get_advance_entries(self, include_unallocated=True):
if self.doctype == "Sales Invoice":
party_account = self.debit_to
party_type = "Customer"
party = self.customer
amount_field = "credit_in_account_currency"
order_field = "sales_order"
order_doctype = "Sales Order"
else:
party_account = self.credit_to
party_type = "Supplier"
party = self.supplier
amount_field = "debit_in_account_currency"
order_field = "purchase_order"
order_doctype = "Purchase Order"
order_list = list(set([d.get(order_field)
for d in self.get("items") if d.get(order_field)]))
journal_entries = get_advance_journal_entries(party_type, party, party_account,
amount_field, order_doctype, order_list, include_unallocated)
payment_entries = get_advance_payment_entries(party_type, party, party_account,
order_doctype, order_list, include_unallocated)
res = journal_entries + payment_entries
return res
def validate_advance_entries(self):
order_field = "sales_order" if self.doctype == "Sales Invoice" else "purchase_order"
order_list = list(set([d.get(order_field)
for d in self.get("items") if d.get(order_field)]))
if not order_list: return
advance_entries = self.get_advance_entries(include_unallocated=False)
if advance_entries:
advance_entries_against_si = [d.reference_name for d in self.get("advances")]
for d in advance_entries:
if not advance_entries_against_si or d.reference_name not in advance_entries_against_si:
frappe.msgprint(_("Payment Entry {0} is linked against Order {1}, check if it should be pulled as advance in this invoice.")
.format(d.reference_name, d.against_order))
def update_against_document_in_jv(self):
"""
Links invoice and advance voucher:
1. cancel advance voucher
2. split into multiple rows if partially adjusted, assign against voucher
3. submit advance voucher
"""
if self.doctype == "Sales Invoice":
party_type = "Customer"
party = self.customer
party_account = self.debit_to
dr_or_cr = "credit_in_account_currency"
else:
party_type = "Supplier"
party = self.supplier
party_account = self.credit_to
dr_or_cr = "debit_in_account_currency"
lst = []
for d in self.get('advances'):
if flt(d.allocated_amount) > 0:
args = frappe._dict({
'voucher_type': d.reference_type,
'voucher_no' : d.reference_name,
'voucher_detail_no' : d.reference_row,
'against_voucher_type' : self.doctype,
'against_voucher' : self.name,
'account' : party_account,
'party_type': party_type,
'party': party,
'is_advance' : 'Yes',
'dr_or_cr' : dr_or_cr,
'unadjusted_amount' : flt(d.advance_amount),
'allocated_amount' : flt(d.allocated_amount),
'exchange_rate': (self.conversion_rate
if self.party_account_currency != self.company_currency else 1),
'grand_total': (self.base_grand_total
if self.party_account_currency==self.company_currency else self.grand_total),
'outstanding_amount': self.outstanding_amount
})
lst.append(args)
if lst:
from erpnext.accounts.utils import reconcile_against_document
reconcile_against_document(lst)
def validate_multiple_billing(self, ref_dt, item_ref_dn, based_on, parentfield):
from erpnext.controllers.status_updater import get_tolerance_for
item_tolerance = {}
global_tolerance = None
for item in self.get("items"):
if item.get(item_ref_dn):
ref_amt = flt(frappe.db.get_value(ref_dt + " Item",
item.get(item_ref_dn), based_on), self.precision(based_on, item))
if not ref_amt:
frappe.msgprint(_("Warning: System will not check overbilling since amount for Item {0} in {1} is zero").format(item.item_code, ref_dt))
else:
already_billed = frappe.db.sql("""select sum(%s) from `tab%s`
where %s=%s and docstatus=1 and parent != %s""" %
(based_on, self.doctype + " Item", item_ref_dn, '%s', '%s'),
(item.get(item_ref_dn), self.name))[0][0]
total_billed_amt = flt(flt(already_billed) + flt(item.get(based_on)),
self.precision(based_on, item))
tolerance, item_tolerance, global_tolerance = get_tolerance_for(item.item_code,
item_tolerance, global_tolerance)
max_allowed_amt = flt(ref_amt * (100 + tolerance) / 100)
if total_billed_amt - max_allowed_amt > 0.01:
frappe.throw(_("Cannot overbill for Item {0} in row {1} more than {2}. To allow overbilling, please set in Stock Settings").format(item.item_code, item.idx, max_allowed_amt))
def get_company_default(self, fieldname):
from erpnext.accounts.utils import get_company_default
return get_company_default(self.company, fieldname)
def get_stock_items(self):
stock_items = []
item_codes = list(set(item.item_code for item in self.get("items")))
if item_codes:
stock_items = [r[0] for r in frappe.db.sql("""select name
from `tabItem` where name in (%s) and is_stock_item=1""" % \
(", ".join((["%s"]*len(item_codes))),), item_codes)]
return stock_items
def set_total_advance_paid(self):
if self.doctype == "Sales Order":
dr_or_cr = "credit_in_account_currency"
party = self.customer
else:
dr_or_cr = "debit_in_account_currency"
party = self.supplier
advance = frappe.db.sql("""
select
account_currency, sum({dr_or_cr}) as amount
from
`tabGL Entry`
where
against_voucher_type = %s and against_voucher = %s and party=%s
and docstatus = 1
""".format(dr_or_cr=dr_or_cr), (self.doctype, self.name, party), as_dict=1)
if advance:
advance = advance[0]
advance_paid = flt(advance.amount, self.precision("advance_paid"))
formatted_advance_paid = fmt_money(advance_paid, precision=self.precision("advance_paid"),
currency=advance.account_currency)
frappe.db.set_value(self.doctype, self.name, "party_account_currency",
advance.account_currency)
if advance.account_currency == self.currency:
order_total = self.grand_total
formatted_order_total = fmt_money(order_total, precision=self.precision("grand_total"),
currency=advance.account_currency)
else:
order_total = self.base_grand_total
formatted_order_total = fmt_money(order_total, precision=self.precision("base_grand_total"),
currency=advance.account_currency)
if self.currency == self.company_currency and advance_paid > order_total:
frappe.throw(_("Total advance ({0}) against Order {1} cannot be greater than the Grand Total ({2})")
.format(formatted_advance_paid, self.name, formatted_order_total))
frappe.db.set_value(self.doctype, self.name, "advance_paid", advance_paid)
@property
def company_abbr(self):
if not hasattr(self, "_abbr"):
self._abbr = frappe.db.get_value("Company", self.company, "abbr")
return self._abbr
def validate_party(self):
party_type, party = self.get_party()
validate_party_frozen_disabled(party_type, party)
def get_party(self):
party_type = None
if self.doctype in ("Opportunity", "Quotation", "Sales Order", "Delivery Note", "Sales Invoice"):
party_type = 'Customer'
elif self.doctype in ("Supplier Quotation", "Purchase Order", "Purchase Receipt", "Purchase Invoice"):
party_type = 'Supplier'
elif self.meta.get_field("customer"):
party_type = "Customer"
elif self.meta.get_field("supplier"):
party_type = "Supplier"
party = self.get(party_type.lower()) if party_type else None
return party_type, party
def validate_currency(self):
if self.get("currency"):
party_type, party = self.get_party()
if party_type and party:
party_account_currency = get_party_account_currency(party_type, party, self.company)
if (party_account_currency
and party_account_currency != self.company_currency
and self.currency != party_account_currency):
frappe.throw(_("Accounting Entry for {0}: {1} can only be made in currency: {2}")
.format(party_type, party, party_account_currency), InvalidCurrency)
# Note: not validating with gle account because we don't have the account
# at quotation / sales order level and we shouldn't stop someone
# from creating a sales invoice if sales order is already created
def validate_fixed_asset(self):
for d in self.get("items"):
if d.is_fixed_asset:
if d.qty > 1:
frappe.throw(_("Row #{0}: Qty must be 1, as item is a fixed asset. Please use separate row for multiple qty.").format(d.idx))
if d.meta.get_field("asset"):
if not d.asset:
frappe.throw(_("Row #{0}: Asset is mandatory for fixed asset purchase/sale")
.format(d.idx))
else:
asset = frappe.get_doc("Asset", d.asset)
if asset.company != self.company:
frappe.throw(_("Row #{0}: Asset {1} does not belong to company {2}")
.format(d.idx, d.asset, self.company))
elif asset.item_code != d.item_code:
frappe.throw(_("Row #{0}: Asset {1} does not linked to Item {2}")
.format(d.idx, d.asset, d.item_code))
elif asset.docstatus != 1:
frappe.throw(_("Row #{0}: Asset {1} must be submitted").format(d.idx, d.asset))
elif self.doctype == "Purchase Invoice":
if asset.status != "Submitted":
frappe.throw(_("Row #{0}: Asset {1} is already {2}")
.format(d.idx, d.asset, asset.status))
elif getdate(asset.purchase_date) != getdate(self.posting_date):
frappe.throw(_("Row #{0}: Posting Date must be same as purchase date {1} of asset {2}").format(d.idx, asset.purchase_date, d.asset))
elif asset.is_existing_asset:
frappe.throw(_("Row #{0}: Purchase Invoice cannot be made against an existing asset {1}").format(d.idx, d.asset))
elif self.docstatus=="Sales Invoice" and self.docstatus == 1:
if self.update_stock:
frappe.throw(_("'Update Stock' cannot be checked for fixed asset sale"))
elif asset.status in ("Scrapped", "Cancelled", "Sold"):
frappe.throw(_("Row #{0}: Asset {1} cannot be submitted, it is already {2}")
.format(d.idx, d.asset, asset.status))
@frappe.whitelist()
def get_tax_rate(account_head):
return frappe.db.get_value("Account", account_head, ["tax_rate", "account_name"], as_dict=True)
@frappe.whitelist()
def get_default_taxes_and_charges(master_doctype):
default_tax = frappe.db.get_value(master_doctype, {"is_default": 1})
return get_taxes_and_charges(master_doctype, default_tax)
@frappe.whitelist()
def get_taxes_and_charges(master_doctype, master_name):
if not master_name:
return
from frappe.model import default_fields
tax_master = frappe.get_doc(master_doctype, master_name)
taxes_and_charges = []
for i, tax in enumerate(tax_master.get("taxes")):
tax = tax.as_dict()
for fieldname in default_fields:
if fieldname in tax:
del tax[fieldname]
taxes_and_charges.append(tax)
return taxes_and_charges
def validate_conversion_rate(currency, conversion_rate, conversion_rate_label, company):
"""common validation for currency and price list currency"""
company_currency = frappe.db.get_value("Company", company, "default_currency", cache=True)
if not conversion_rate:
throw(_("{0} is mandatory. Maybe Currency Exchange record is not created for {1} to {2}.").format(
conversion_rate_label, currency, company_currency))
def validate_taxes_and_charges(tax):
if tax.charge_type in ['Actual', 'On Net Total'] and tax.row_id:
frappe.throw(_("Can refer row only if the charge type is 'On Previous Row Amount' or 'Previous Row Total'"))
elif tax.charge_type in ['On Previous Row Amount', 'On Previous Row Total']:
if cint(tax.idx) == 1:
frappe.throw(_("Cannot select charge type as 'On Previous Row Amount' or 'On Previous Row Total' for first row"))
elif not tax.row_id:
frappe.throw(_("Please specify a valid Row ID for row {0} in table {1}".format(tax.idx, _(tax.doctype))))
elif tax.row_id and cint(tax.row_id) >= cint(tax.idx):
frappe.throw(_("Cannot refer row number greater than or equal to current row number for this Charge type"))
if tax.charge_type == "Actual":
tax.rate = None
def validate_inclusive_tax(tax, doc):
def _on_previous_row_error(row_range):
throw(_("To include tax in row {0} in Item rate, taxes in rows {1} must also be included").format(tax.idx,
row_range))
if cint(getattr(tax, "included_in_print_rate", None)):
if tax.charge_type == "Actual":
# inclusive tax cannot be of type Actual
throw(_("Charge of type 'Actual' in row {0} cannot be included in Item Rate").format(tax.idx))
elif tax.charge_type == "On Previous Row Amount" and \
not cint(doc.get("taxes")[cint(tax.row_id) - 1].included_in_print_rate):
# referred row should also be inclusive
_on_previous_row_error(tax.row_id)
elif tax.charge_type == "On Previous Row Total" and \
not all([cint(t.included_in_print_rate) for t in doc.get("taxes")[:cint(tax.row_id) - 1]]):
# all rows about the reffered tax should be inclusive
_on_previous_row_error("1 - %d" % (tax.row_id,))
elif tax.get("category") == "Valuation":
frappe.throw(_("Valuation type charges can not marked as Inclusive"))
def set_balance_in_account_currency(gl_dict, account_currency=None, conversion_rate=None, company_currency=None):
if (not conversion_rate) and (account_currency!=company_currency):
frappe.throw(_("Account: {0} with currency: {1} can not be selected")
.format(gl_dict.account, account_currency))
gl_dict["account_currency"] = company_currency if account_currency==company_currency \
else account_currency
# set debit/credit in account currency if not provided
if flt(gl_dict.debit) and not flt(gl_dict.debit_in_account_currency):
gl_dict.debit_in_account_currency = gl_dict.debit if account_currency==company_currency \
else flt(gl_dict.debit / conversion_rate, 2)
if flt(gl_dict.credit) and not flt(gl_dict.credit_in_account_currency):
gl_dict.credit_in_account_currency = gl_dict.credit if account_currency==company_currency \
else flt(gl_dict.credit / conversion_rate, 2)
def get_advance_journal_entries(party_type, party, party_account, amount_field,
order_doctype, order_list, include_unallocated=True):
dr_or_cr = "credit_in_account_currency" if party_type=="Customer" else "debit_in_account_currency"
conditions = []
if include_unallocated:
conditions.append("ifnull(t2.reference_name, '')=''")
if order_list:
order_condition = ', '.join(['%s'] * len(order_list))
conditions.append(" (t2.reference_type = '{0}' and ifnull(t2.reference_name, '') in ({1}))"\
.format(order_doctype, order_condition))
reference_condition = " and (" + " or ".join(conditions) + ")" if conditions else ""
journal_entries = frappe.db.sql("""
select
"Journal Entry" as reference_type, t1.name as reference_name,
t1.remark as remarks, t2.{0} as amount, t2.name as reference_row,
t2.reference_name as against_order
from
`tabJournal Entry` t1, `tabJournal Entry Account` t2
where
t1.name = t2.parent and t2.account = %s
and t2.party_type = %s and t2.party = %s
and t2.is_advance = 'Yes' and t1.docstatus = 1
and {1} > 0 {2}
order by t1.posting_date""".format(amount_field, dr_or_cr, reference_condition),
[party_account, party_type, party] + order_list, as_dict=1)
return list(journal_entries)
def get_advance_payment_entries(party_type, party, party_account,
order_doctype, order_list=None, include_unallocated=True, against_all_orders=False):
party_account_field = "paid_from" if party_type == "Customer" else "paid_to"
payment_type = "Receive" if party_type == "Customer" else "Pay"
payment_entries_against_order, unallocated_payment_entries = [], []
if order_list or against_all_orders:
if order_list:
reference_condition = " and t2.reference_name in ({0})"\
.format(', '.join(['%s'] * len(order_list)))
else:
reference_condition = ""
order_list = []
payment_entries_against_order = frappe.db.sql("""
select
"Payment Entry" as reference_type, t1.name as reference_name,
t1.remarks, t2.allocated_amount as amount, t2.name as reference_row,
t2.reference_name as against_order, t1.posting_date
from `tabPayment Entry` t1, `tabPayment Entry Reference` t2
where
t1.name = t2.parent and t1.{0} = %s and t1.payment_type = %s
and t1.party_type = %s and t1.party = %s and t1.docstatus = 1
and t2.reference_doctype = %s {1}
""".format(party_account_field, reference_condition),
[party_account, payment_type, party_type, party, order_doctype] + order_list, as_dict=1)
if include_unallocated:
unallocated_payment_entries = frappe.db.sql("""
select "Payment Entry" as reference_type, name as reference_name,
remarks, unallocated_amount as amount
from `tabPayment Entry`
where
{0} = %s and party_type = %s and party = %s and payment_type = %s
and docstatus = 1 and unallocated_amount > 0
""".format(party_account_field), (party_account, party_type, party, payment_type), as_dict=1)
return list(payment_entries_against_order) + list(unallocated_payment_entries) | gpl-3.0 | -5,101,145,368,860,695,000 | 38.035961 | 180 | 0.690383 | false |
cortext/crawtextV2 | page.py | 1 | 4427 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#from __future__ import print_function
import datetime
from os.path import exists
import sys
import requests
import json
import re
#from goose import Goose
from pymongo import errors as mongo_err
#from bs4 import BeautifulSoup as bs
#import beautifulsoup4 as bs
from urlparse import urlparse
from random import choice
from tld import get_tld
from scrapper import *
from utils.url import *
from scrapper.article import Article
class Page(object):
'''Page factory'''
def __init__(self, url, step = 0, output_format="defaut"):
self.url = url
#~ if query is not None:
#~ self.match_query = regexify(query)
self.step = step
self.crawl_date = datetime.datetime.now()
self.status = {"msg":None, "status": None, "code": None, "step": "page creation", "url": self.url}
#~ self.error_type = "Ok"
#~ self.status = "Ok"
#~ self.status_code = 0
self.output_format = output_format
def check(self):
self.status["step"] = "check page"
self.status["status"], self.status["code"], self.status["msg"], self.status["url"] = check_url(self.url)
self.url = self.status["url"]
return self.status["status"]
def request(self):
'''Bool request a webpage: return boolean and update raw_html'''
self.status["step"] = "request page"
try:
requests.adapters.DEFAULT_RETRIES = 2
user_agents = [u'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1', u'Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20120716 Firefox/15.0a2', u'Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0', u'Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00']
headers = {'User-Agent': choice(user_agents),}
proxies = {"https":"77.120.126.35:3128", "https":'88.165.134.24:3128', }
try:
self.req = requests.get((self.url), headers = headers,allow_redirects=True, proxies=None, timeout=5)
try:
self.raw_html = self.req.text
self.status["status"] = True
self.status["code"] = 200
except Exception, e:
self.status["msg"] = "Request answer was not understood %s" %e
self.status["code"] = 400
self.status["status"] = False
except Exception, e:
self.status["msg"] = "Request answer was not understood %s" %e
self.status["code"] = 400
self.status["status"] = False
except requests.exceptions.MissingSchema:
self.status["msg"] = "Incorrect url - Missing sheme for : %s" %self.url
self.status["code"] = 406
self.status["status"] = False
except Exception as e:
self.status["msg"] = "Another wired exception: %s %s" %(e, e.args)
self.status["code"] = 204
self.status["status"] = False
return self.status["status"]
def control(self):
'''Bool control the result if text/html or if content available'''
self.status["step"] = "control"
#Content-type is not html
try:
self.req.headers['content-type']
if 'text/html' not in self.req.headers['content-type']:
self.status["msg"]="Content type is not TEXT/HTML"
self.status["code"] = 404
self.status["status"] = False
#Error on ressource or on server
elif self.req.status_code in range(400,520):
self.status["code"] = self.req.status_code
self.status["msg"]="Request error on connexion no ressources or not able to reach server"
self.status["status"] = False
#Redirect
#~ elif len(self.req.history) > 0 | self.req.status_code in range(300,320):
#~ self.error_type="Redirection"
#~ self.bad_status()
#~ return False
else:
self.status["status"] = True
#Headers problems
except Exception:
self.status["msg"]="Request headers were not found"
self.status["code"] = 403
self.status["status"] = False
return self.status["status"]
def extract(self, type="article"):
'''Dict extract content and info of webpage return boolean and self.info'''
#self.status["step"] = "extract %s" %type
a = Article(self.url, self.raw_html)
return a.get()
'''
def is_relevant(self, query, content):
if query.match(self,unicode(content)) is False:
self.status = {"url":self.url, "code": -1, "msg": "Not Relevant","status": False, "title": self.title, "content": self.content}
return False
else:
self.status =
return True
'''
| mit | 8,397,858,714,817,989,000 | 33.585938 | 445 | 0.658911 | false |
galuszkak/djangodash | game/migrations/0007_auto__chg_field_game_host.py | 1 | 5665 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Game.host'
db.alter_column(u'game_game', 'host_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['game.MemoUser']))
def backwards(self, orm):
# Changing field 'Game.host'
db.alter_column(u'game_game', 'host_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['game.MemoUser']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'game.configuration': {
'Meta': {'object_name': 'Configuration'},
'charValue': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['game.Game']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intValue': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
u'game.game': {
'Meta': {'object_name': 'Game'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hosted_games_set'", 'null': 'True', 'to': u"orm['game.MemoUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'players': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['game.MemoUser']", 'symmetrical': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'WA'", 'max_length': '2'})
},
u'game.memouser': {
'Meta': {'object_name': 'MemoUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'game.statistic': {
'Meta': {'object_name': 'Statistic'},
'charValue': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['game.Game']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intValue': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['game.MemoUser']", 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '3'})
}
}
complete_apps = ['game'] | gpl-2.0 | -7,510,298,253,660,137,000 | 66.452381 | 187 | 0.55481 | false |
wraiden/spacewalk | backend/satellite_tools/repo_plugins/__init__.py | 1 | 12733 | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import sys
import re
import time
from Queue import Queue, Empty
from threading import Thread, Lock
try:
# python 2
import urlparse
except ImportError:
# python3
import urllib.parse as urlparse # pylint: disable=F0401,E0611
from urllib import quote
import pycurl
import rpm
from urlgrabber.grabber import URLGrabberOptions, PyCurlFileObject, URLGrabError
from spacewalk.common import rhn_pkg
from spacewalk.common.checksum import getFileChecksum
from spacewalk.common.rhnConfig import CFG, initCFG
from spacewalk.common.rhnException import rhnFault
from spacewalk.server import rhnPackageUpload
from spacewalk.satellite_tools.syncLib import log, log2
class ProgressBarLogger:
def __init__(self, msg, total):
self.msg = msg
self.total = total
self.status = 0
self.lock = Lock()
def log(self, *_):
self.lock.acquire()
self.status += 1
self._print_progress_bar(self.status, self.total, prefix=self.msg, bar_length=50)
self.lock.release()
# from here http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
# Print iterations progress
@staticmethod
def _print_progress_bar(iteration, total, prefix='', suffix='', decimals=2, bar_length=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
filled_length = int(round(bar_length * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
bar_char = '#' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar_char, percents, '%', suffix))
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
class TextLogger:
def __init__(self, _, total):
self.total = total
self.status = 0
self.lock = Lock()
def log(self, success, param):
self.lock.acquire()
self.status += 1
if success:
log(0, "%d/%d : %s" % (self.status, self.total, str(param)))
else:
log2(0, 0, "%d/%d : %s (failed)" % (self.status, self.total, str(param)), stream=sys.stderr)
self.lock.release()
# Older versions of urlgrabber don't allow to set proxy parameters separately
# Simplified version from yumRepository class
def get_proxies(proxy, user, password):
if not proxy:
return {}
proxy_string = proxy
if user:
auth = quote(user)
if password:
auth += ':' + quote(password)
proto, rest = re.match(r'(\w+://)(.+)', proxy_string).groups()
proxy_string = "%s%s@%s" % (proto, auth, rest)
proxies = {'http': proxy_string, 'https': proxy_string, 'ftp': proxy_string}
return proxies
class PyCurlFileObjectThread(PyCurlFileObject):
def __init__(self, url, filename, opts, curl_cache):
self.curl_cache = curl_cache
PyCurlFileObject.__init__(self, url, filename, opts)
def _do_open(self):
self.curl_obj = self.curl_cache
self.curl_obj.reset()
self._set_opts()
self._do_grab()
return self.fo
class FailedDownloadError(Exception):
pass
class DownloadThread(Thread):
def __init__(self, parent):
Thread.__init__(self)
self.parent = parent
# pylint: disable=E1101
self.curl = pycurl.Curl()
self.mirror = 0
@staticmethod
def __is_file_done(local_path=None, file_obj=None, checksum_type=None, checksum=None):
if checksum_type and checksum:
if local_path and os.path.isfile(local_path):
return getFileChecksum(checksum_type, filename=local_path) == checksum
elif file_obj:
return getFileChecksum(checksum_type, file_obj=file_obj) == checksum
if local_path and os.path.isfile(local_path):
return True
elif file_obj:
return True
return False
def __can_retry(self, retry, mirrors, opts, url, e):
retrycode = getattr(e, 'errno', None)
code = getattr(e, 'code', None)
if retry < (self.parent.retries - 1):
# No codes at all or some specified codes
# 58, 77 - Couple of curl error codes observed in multithreading on RHEL 7 - probably a bug
if (retrycode is None and code is None) or (retrycode in opts.retrycodes or code in [58, 77]):
log2(0, 2, "ERROR: Download failed: %s - %s. Retrying..." % (url, sys.exc_info()[1]),
stream=sys.stderr)
return True
# 14 - HTTP Error
if retry < (mirrors - 1) and retrycode == 14:
log2(0, 2, "ERROR: Download failed: %s - %s. Trying next mirror..." % (url, sys.exc_info()[1]),
stream=sys.stderr)
return True
log2(0, 1, "ERROR: Download failed: %s - %s." % (url, sys.exc_info()[1]),
stream=sys.stderr)
return False
def __next_mirror(self, total):
if self.mirror < (total - 1):
self.mirror += 1
else:
self.mirror = 0
def __fetch_url(self, params):
# Skip existing file if exists and matches checksum
if not self.parent.force:
if self.__is_file_done(local_path=params['target_file'], checksum_type=params['checksum_type'],
checksum=params['checksum']):
return True
opts = URLGrabberOptions(ssl_ca_cert=params['ssl_ca_cert'], ssl_cert=params['ssl_client_cert'],
ssl_key=params['ssl_client_key'], range=params['bytes_range'],
proxy=params['proxy'], username=params['proxy_username'],
password=params['proxy_password'], proxies=params['proxies'])
mirrors = len(params['urls'])
for retry in max(range(self.parent.retries), mirrors):
fo = None
url = urlparse.urljoin(params['urls'][self.mirror], params['relative_path'])
try:
try:
fo = PyCurlFileObjectThread(url, params['target_file'], opts, self.curl)
# Check target file
if not self.__is_file_done(file_obj=fo, checksum_type=params['checksum_type'],
checksum=params['checksum']):
raise FailedDownloadError("Target file isn't valid. Checksum should be %s (%s)."
% (params['checksum'], params['checksum_type']))
break
except (FailedDownloadError, URLGrabError):
e = sys.exc_info()[1]
if not self.__can_retry(retry, mirrors, opts, url, e):
return False
self.__next_mirror(mirrors)
finally:
if fo:
fo.close()
# Delete failed download file
elif os.path.isfile(params['target_file']):
os.unlink(params['target_file'])
return True
def run(self):
while not self.parent.queue.empty():
try:
params = self.parent.queue.get(block=False)
except Empty:
break
self.mirror = 0
success = self.__fetch_url(params)
if self.parent.log_obj:
# log_obj must be thread-safe
self.parent.log_obj.log(success, os.path.basename(params['relative_path']))
self.parent.queue.task_done()
class ThreadedDownloader:
def __init__(self, retries=3, log_obj=None, force=False):
self.queue = Queue()
initCFG('server.satellite')
self.threads = CFG.REPOSYNC_DOWNLOAD_THREADS
self.retries = retries
self.log_obj = log_obj
self.force = force
def set_log_obj(self, log_obj):
self.log_obj = log_obj
def set_force(self, force):
self.force = force
@staticmethod
def _validate(ssl_ca_cert, ssl_cert, ssl_key):
for certificate_file in (ssl_ca_cert, ssl_cert, ssl_key):
if certificate_file and not os.path.isfile(certificate_file):
log2(0, 0, "ERROR: Certificate file not found: %s" % certificate_file, stream=sys.stderr)
return False
return True
def add(self, params):
if self._validate(params['ssl_ca_cert'], params['ssl_client_cert'], params['ssl_client_key']):
self.queue.put(params)
def run(self):
size = self.queue.qsize()
if size <= 0:
return
log(1, "Downloading %s files." % str(size))
started_threads = []
for _ in range(self.threads):
thread = DownloadThread(self)
thread.setDaemon(True)
thread.start()
started_threads.append(thread)
# wait to finish
while any(t.isAlive() for t in started_threads):
time.sleep(1)
class ContentPackage:
def __init__(self):
# map of checksums
self.checksum_type = None
self.checksum = None
# unique ID that can be used by plugin
self.unique_id = None
self.name = None
self.version = None
self.release = None
self.epoch = None
self.arch = None
self.path = None
self.a_pkg = None
def __cmp__(self, other):
ret = cmp(self.name, other.name)
if ret == 0:
rel_self = str(self.release).split('.')[0]
rel_other = str(other.release).split('.')[0]
# pylint: disable=E1101
ret = rpm.labelCompare((str(self.epoch), str(self.version), rel_self),
(str(other.epoch), str(other.version), rel_other))
if ret == 0:
ret = cmp(self.arch, other.arch)
return ret
def getNRA(self):
rel = re.match(".*?\\.(.*)",self.release)
rel = rel.group(1)
nra = str(self.name) + str(rel) + str(self.arch)
return nra
def setNVREA(self, name, version, release, epoch, arch):
self.name = name
self.version = version
self.release = release
self.arch = arch
self.epoch = epoch
def getNVREA(self):
if self.epoch:
return self.name + '-' + self.version + '-' + self.release + '-' + self.epoch + '.' + self.arch
else:
return self.name + '-' + self.version + '-' + self.release + '.' + self.arch
def getNEVRA(self):
if self.epoch is None:
self.epoch = '0'
return self.name + '-' + self.epoch + ':' + self.version + '-' + self.release + '.' + self.arch
def load_checksum_from_header(self):
if self.path is None:
raise rhnFault(50, "Unable to load package", explain=0)
self.a_pkg = rhn_pkg.package_from_filename(self.path)
self.a_pkg.read_header()
self.a_pkg.payload_checksum()
self.a_pkg.input_stream.close()
def upload_package(self, channel, metadata_only=False):
if not metadata_only:
rel_package_path = rhnPackageUpload.relative_path_from_header(
self.a_pkg.header, channel['org_id'], self.a_pkg.checksum_type, self.a_pkg.checksum)
else:
rel_package_path = None
_unused = rhnPackageUpload.push_package(self.a_pkg,
force=False,
relative_path=rel_package_path,
org_id=channel['org_id'])
| gpl-2.0 | -7,994,968,211,476,632,000 | 36.014535 | 107 | 0.56758 | false |
umitproject/tease-o-matic | django_mongodb_engine/widgets.py | 1 | 2621 | from django.conf import settings
from django.forms import widgets
from django.db import models
from django.utils.safestring import mark_safe
import warnings
warnings.warn("django_mongodb_engine.widgets is deprecated and will be removed "
"in version 0.5", DeprecationWarning)
class DictWidget(widgets.Widget):
def value_from_datadict(self, data, files, name):
if data.has_key("%s_rows" % name):
returnlist ={}
rows= int( data["%s_rows" % name])
while rows > 0:
rows -= 1
rowname = "%s_%d" % (name, rows )
if data.has_key("%s_key" % rowname ) :
k = data["%s_key" % rowname]
if k != "":
v = None
if data.has_key("%s_value" % rowname ) :
v = data["%s_value"%rowname]
returnlist[k]=v
rowname = "%s_new" % name
if data.has_key("%s_key" % rowname ) :
k = data["%s_key" % rowname]
if k != "":
v = None
if data.has_key("%s_value" % rowname ) :
v = data["%s_value"%rowname]
returnlist[k]=v
return returnlist
else:
return None
def render(self, name, value, attrs=None):
htmlval="<table><tr><td>#</td><td>Key</td><td>Value</td></tr>"
linenum=0
idname = attrs['id']
if (value is not None) and (type(value).__name__=='dict') :
for key, val in value.items():
idname_row = "%s_%d" % ( idname, linenum )
htmlval += '<tr><td><label for="%s_key">%d</label></td><td><input type="txt" id="%s_key" name="%s_%d_key" value="%s" /></td>' % (
idname_row, linenum ,idname_row, name,linenum, key )
htmlval += '<td><input type="txt" id="%s_value" name="%s_%d_value" value="%s" /></td></tr>' % (
idname_row, name,linenum, val)
linenum += 1
idname_row = "%s_new" % ( idname )
htmlval += '<tr><td><label for="%s_key">new</label></td><td><input type="txt" id="%s_key" name="%s_new_key" value="" /></td>' % (
idname_row, idname_row, name)
htmlval += '<td><input type="txt" id="%s_value" name="%s_new_value" value="" /></td></tr>' % (
idname_row, name )
htmlval += "</table>"
htmlval += "<input type='hidden' name='%s_rows' value='%d'>" % ( name, linenum )
return mark_safe(htmlval)
| bsd-3-clause | 4,250,996,487,729,507,000 | 40.603175 | 145 | 0.47272 | false |
ib-lundgren/django-oauthlib | django_oauthlib/views.py | 1 | 4525 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import View
# TODO: don't import errors like this
from oauthlib.oauth2.draft25 import errors
from oauthlib.oauth2 import Server
from .validator import DjangoValidator
from .utils import extract_params, log
def get_credentials(request):
return {}
def get_authorization(request):
return request.POST.getlist(['scopes']), {'user': request.user}
def get_actual_authorization_view(request):
# TODO: use template?
def basic_view(request, client_id=None, scopes=None, **kwargs):
response = HttpResponse()
response.write('<h1> Authorize access to %s </h1>' % client_id)
response.write('<form method="POST" action="/post_authorization">')
for scope in scopes or []:
response.write('<input type="checkbox" name="scopes" value="%s"/> %s' % (scope, scope))
response.write('<input type="submit" value="Authorize"/>')
return response
return basic_view
class AuthorizationView(View):
def __init__(self):
validator = DjangoValidator()
# TODO: this should probably be tunable through settings
self._authorization_endpoint = Server(validator)
self._error_uri = reverse('oauth2_error')
def get(self, request, *args, **kwargs):
uri, http_method, body, headers = extract_params(request)
redirect_uri = request.GET.get('redirect_uri', None)
log.debug('Found redirect uri %s.', redirect_uri)
try:
scopes, credentials = self._authorization_endpoint.validate_authorization_request(
uri, http_method, body, headers)
log.debug('Saving credentials to session, %r.', credentials)
request.session['oauth2_credentials'] = credentials
kwargs['scopes'] = scopes
kwargs.update(credentials)
actual_view = get_actual_authorization_view(request)
log.debug('Invoking actual view method, %r.', actual_view)
return actual_view(request, *args, **kwargs)
except errors.FatalClientError as e:
log.debug('Fatal client error, redirecting to error page.')
return HttpResponseRedirect(e.in_uri(self._error_uri))
except errors.OAuth2Error as e:
log.debug('Client error, redirecting back to client.')
# TODO: remove after federico PR
e.redirect_uri = redirect_uri or 'https://localhost'
return HttpResponseRedirect(e.in_uri(e.redirect_uri))
@csrf_exempt
def post(self, request, *args, **kwargs):
uri, http_method, body, headers = extract_params(request)
scopes, credentials = get_authorization(request)
log.debug('Fetched credentials view, %r.', credentials)
credentials.update(request.session.get('oauth2_credentials', {}))
log.debug('Fetched credentials from session, %r.', credentials)
redirect_uri = credentials.get('redirect_uri')
log.debug('Found redirect uri %s.', redirect_uri)
try:
url, headers, body, status = self._authorization_endpoint.create_authorization_response(
uri, http_method, body, headers, scopes, credentials)
log.debug('Authorization successful, redirecting to client.')
return HttpResponseRedirect(url)
except errors.FatalClientError as e:
log.debug('Fatal client error, redirecting to error page.')
return HttpResponseRedirect(e.in_uri(self._error_uri))
except errors.OAuth2Error as e:
log.debug('Client error, redirecting back to client.')
return HttpResponseRedirect(e.in_uri(redirect_uri))
class TokenView(View):
def __init__(self, token_endpoint):
self._token_endpoint = token_endpoint
def post(self, request, *args, **kwargs):
uri, http_method, body, headers = extract_params(request)
credentials = get_credentials(request)
log.debug('Fetched credentials view, %r.', credentials)
url, headers, body, status = self._token_endpoint.create_token_response(
uri, http_method, body, headers, credentials)
response = HttpResponse(content=body, status=status)
for k, v in headers.items():
response[k] = v
return response
class ErrorView(View):
pass
| bsd-3-clause | -8,697,564,746,311,574,000 | 40.136364 | 100 | 0.655249 | false |
sorpaas/reread | reader/views/queries/read_records.py | 1 | 1465 | from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from reader.documents import *
import datetime
import json
import urllib
@login_required
def like_article(request, record_id):
record = ReadRecord.objects.get(id=record_id)
record.is_liked = True
record.save()
return HttpResponse(json.dumps({"status": "ok"}),
content_type="application/json")
@login_required
def unlike_article(request, record_id):
record = ReadRecord.objects.get(id=record_id)
record.is_liked = False
record.save()
return HttpResponse(json.dumps({"status": "ok"}),
content_type="application/json")
@login_required
def create(request, article_id):
reader = Reader.reader_for(request.user)
article = Article.objects.get(id=article_id)
reader.reading_list = [x for x in reader.reading_list if str(x['article_id']) != str(article_id)]
reader.save()
try:
record = ReadRecord.objects.get(reader=reader, article=article)
except ReadRecord.DoesNotExist:
record = ReadRecord(reader=reader, article=article)
record.save()
return HttpResponse(json.dumps({"status": "ok",
"record_id": str(record.id),
"is_liked": record.is_liked}),
content_type="application/json")
| mit | 6,258,733,803,302,709,000 | 36.564103 | 101 | 0.651877 | false |
wujuguang/scrapyd | scrapyd/webservice.py | 3 | 6619 | from copy import copy
import traceback
import uuid
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from twisted.python import log
from .utils import get_spider_list, JsonResource, UtilsCache, native_stringify_dict
class WsResource(JsonResource):
def __init__(self, root):
JsonResource.__init__(self)
self.root = root
def render(self, txrequest):
try:
return JsonResource.render(self, txrequest).encode('utf-8')
except Exception as e:
if self.root.debug:
return traceback.format_exc().encode('utf-8')
log.err()
r = {"node_name": self.root.nodename, "status": "error", "message": str(e)}
return self.render_object(r, txrequest).encode('utf-8')
class DaemonStatus(WsResource):
def render_GET(self, txrequest):
pending = sum(q.count() for q in self.root.poller.queues.values())
running = len(self.root.launcher.processes)
finished = len(self.root.launcher.finished)
return {"node_name": self.root.nodename, "status":"ok", "pending": pending, "running": running, "finished": finished}
class Schedule(WsResource):
def render_POST(self, txrequest):
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
settings = args.pop('setting', [])
settings = dict(x.split('=', 1) for x in settings)
args = dict((k, v[0]) for k, v in args.items())
project = args.pop('project')
spider = args.pop('spider')
version = args.get('_version', '')
priority = float(args.pop('priority', 0))
spiders = get_spider_list(project, version=version)
if not spider in spiders:
return {"status": "error", "message": "spider '%s' not found" % spider}
args['settings'] = settings
jobid = args.pop('jobid', uuid.uuid1().hex)
args['_job'] = jobid
self.root.scheduler.schedule(project, spider, priority=priority, **args)
return {"node_name": self.root.nodename, "status": "ok", "jobid": jobid}
class Cancel(WsResource):
def render_POST(self, txrequest):
args = dict((k, v[0])
for k, v in native_stringify_dict(copy(txrequest.args),
keys_only=False).items())
project = args['project']
jobid = args['job']
signal = args.get('signal', 'TERM')
prevstate = None
queue = self.root.poller.queues[project]
c = queue.remove(lambda x: x["_job"] == jobid)
if c:
prevstate = "pending"
spiders = self.root.launcher.processes.values()
for s in spiders:
if s.project == project and s.job == jobid:
s.transport.signalProcess(signal)
prevstate = "running"
return {"node_name": self.root.nodename, "status": "ok", "prevstate": prevstate}
class AddVersion(WsResource):
def render_POST(self, txrequest):
eggf = BytesIO(txrequest.args.pop(b'egg')[0])
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
project = args['project'][0]
version = args['version'][0]
self.root.eggstorage.put(eggf, project, version)
spiders = get_spider_list(project, version=version)
self.root.update_projects()
UtilsCache.invalid_cache(project)
return {"node_name": self.root.nodename, "status": "ok", "project": project, "version": version, \
"spiders": len(spiders)}
class ListProjects(WsResource):
def render_GET(self, txrequest):
projects = list(self.root.scheduler.list_projects())
return {"node_name": self.root.nodename, "status": "ok", "projects": projects}
class ListVersions(WsResource):
def render_GET(self, txrequest):
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
project = args['project'][0]
versions = self.root.eggstorage.list(project)
return {"node_name": self.root.nodename, "status": "ok", "versions": versions}
class ListSpiders(WsResource):
def render_GET(self, txrequest):
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
project = args['project'][0]
version = args.get('_version', [''])[0]
spiders = get_spider_list(project, runner=self.root.runner, version=version)
return {"node_name": self.root.nodename, "status": "ok", "spiders": spiders}
class ListJobs(WsResource):
def render_GET(self, txrequest):
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
project = args.get('project', [None])[0]
spiders = self.root.launcher.processes.values()
queues = self.root.poller.queues
pending = [
{"project": qname, "spider": x["name"], "id": x["_job"]}
for qname in (queues if project is None else [project])
for x in queues[qname].list()
]
running = [
{
"project": s.project,
"spider": s.spider,
"id": s.job, "pid": s.pid,
"start_time": str(s.start_time),
} for s in spiders if project is None or s.project == project
]
finished = [
{
"project": s.project,
"spider": s.spider, "id": s.job,
"start_time": str(s.start_time),
"end_time": str(s.end_time)
} for s in self.root.launcher.finished
if project is None or s.project == project
]
return {"node_name": self.root.nodename, "status": "ok",
"pending": pending, "running": running, "finished": finished}
class DeleteProject(WsResource):
def render_POST(self, txrequest):
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
project = args['project'][0]
self._delete_version(project)
UtilsCache.invalid_cache(project)
return {"node_name": self.root.nodename, "status": "ok"}
def _delete_version(self, project, version=None):
self.root.eggstorage.delete(project, version)
self.root.update_projects()
class DeleteVersion(DeleteProject):
def render_POST(self, txrequest):
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
project = args['project'][0]
version = args['version'][0]
self._delete_version(project, version)
UtilsCache.invalid_cache(project)
return {"node_name": self.root.nodename, "status": "ok"}
| bsd-3-clause | -5,762,503,081,443,580,000 | 37.935294 | 125 | 0.599033 | false |
ohrstrom/django-nunjucks | nunjucks/management/commands/nunjucks_tools.py | 1 | 4480 | import os
import sys
from collections import OrderedDict
from optparse import make_option
from django.core.files.storage import FileSystemStorage
from django.core.management.base import CommandError, NoArgsCommand
from django.contrib.staticfiles import finders, storage
from django.template.loader import render_to_string
from nunjucks.compiler import NunjucksCompiler
class Command(NoArgsCommand):
"""
Command that allows to copy or symlink static files from different
locations to the settings.STATIC_ROOT.
"""
option_list = NoArgsCommand.option_list + (
make_option('--compile',
action='store_false', dest='do_compile', default=False,
help="Compile nunjucks templates"),
make_option('-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except modify the filesystem."),
)
help = "Collect static files in a single location."
requires_model_validation = False
def __init__(self, *args, **kwargs):
super(NoArgsCommand, self).__init__(*args, **kwargs)
self.storage = storage.staticfiles_storage
try:
self.storage.path('')
except NotImplementedError:
self.local = False
else:
self.local = True
self.compiler = NunjucksCompiler()
def set_options(self, **options):
self.do_compile = options['do_compile']
self.dry_run = options['dry_run']
def collect(self):
target = 'apps/nunjucks/static/nunjucks/js/templates.js'
templates = []
for finder in finders.get_finders():
for path, storage in finder.list(['*zinnia*']):
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
# TOTO: find a correct way to get nj-paths
if '/nj/' in path:
templates.append( {
'path': path,
'inner': self.compiler.compile(storage.path(path))
}
)
tpl = render_to_string('nunjucks/compile/templates.js', {'templates': templates})
open(target, "w").write(tpl)
return
def handle_noargs(self, **options):
self.set_options(**options)
# Warn before doing anything more.
if (isinstance(self.storage, FileSystemStorage) and
self.storage.location):
destination_path = self.storage.location
destination_display = ':\n\n %s' % destination_path
else:
destination_path = None
destination_display = '.'
collected = self.collect()
def compile_file(self, path, prefixed_path, source_storage):
# dummy, to test compiler
source_path = source_storage.path(path)
if 'on_air_item.html' in path:
print
print 'path: %s' % path
print 'prefixed_path: %s' % prefixed_path
print 'source_path: %s' % source_path
print 'source_storage: %s' % source_storage
print self.compiler.compile(source_path)
def copy_file(self, path, prefixed_path, source_storage):
"""
Attempt to copy ``path`` with storage
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.copied_files:
return self.log("Skipping '%s' (already copied earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally start copying
if self.dry_run:
self.log("Pretending to copy '%s'" % source_path, level=1)
else:
self.log("Copying '%s'" % source_path, level=1)
if self.local:
full_path = self.storage.path(prefixed_path)
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
with source_storage.open(path) as source_file:
self.storage.save(prefixed_path, source_file)
if not prefixed_path in self.copied_files:
self.copied_files.append(prefixed_path) | bsd-3-clause | 7,821,409,506,878,498,000 | 30.556338 | 89 | 0.577902 | false |
CaptainHayashi/lass | urysite/url_regexes.py | 1 | 1298 | """
Common regular expression stubs for URLconfs.
These are collected in a common module to ensure consistency across
the LASS platform.
"""
# Helper functions #
def relative(partials):
"""
Given a sequence of partial regexes, constructs a full regex that
treats the partial regexes as stages in a directory hierarchy
relative to the current root.
"""
return r'^{0}/$'.format('/'.join(partials))
def relatives(partial_sequences):
"""
A wrapper around `relative` that processes a sequence of partial
regex sequences.
"""
return (relative(x) for x in partial_sequences)
# Dates #
## Partial regexes
# NB: The year regex is limited to years 1-9999.
# This is intentional and mirrors the MAX_YEAR Python used at time of
# writing (thus preventing year overflows).
YEAR_PARTIAL = r'(?P<year>[1-9]\d?\d?\d?)'
WEEK_PARTIAL = r'[wW](eek)?(?P<week>([0-4]?\d|5[0-3]))'
WEEKDAY_PARTIAL = r'[dD]?(ay)?(?P<weekday>[1-7])'
MONTH_PARTIAL = r'(?P<month>(0?\d|1[12]))'
DAY_PARTIAL = r'(?P<day>([0-2]?\d|3[01]))'
## Full relative regexes
WEEK_REGEX, WEEKDAY_REGEX, DAY_REGEX = (
relative(x) for x in (
(YEAR_PARTIAL, WEEK_PARTIAL),
(YEAR_PARTIAL, WEEK_PARTIAL, WEEKDAY_PARTIAL),
(YEAR_PARTIAL, MONTH_PARTIAL, DAY_PARTIAL),
)
)
| gpl-2.0 | 6,314,977,351,025,411,000 | 24.96 | 69 | 0.657935 | false |
tbenthompson/LMS_public | lms_code/plots/plot_hazard.py | 1 | 3557 | import matplotlib.pyplot as plt
import numpy as np
from lms_code.analysis.run_bem import get_slip_magnitude
import lms_code.lib.rep2 as rep2
import lms_code.plots.plot_all as lms_plot
def main():
lms_plot.setup()
fig = plt.figure()
which_model = 'all_details'
bem_soln = rep2.load('bem_' + which_model)
shortening = rep2.load('shortening_estimate_' + which_model)
est = shortening['lsqr_shortening']
est_low = est - shortening['lsqr_shortening_error']
est_high = est + shortening['lsqr_shortening_error']
total_length = 0.0
slip = 0.0
slip_low = 0.0
slip_high = 0.0
joint = [4.20012e5 + 1.6, -2.006e4 - 5]
for e in bem_soln['fault_mesh']:
if e.vertex1.loc[0] < joint[0] - 10:
continue
total_length += e.length
slip_mag = np.linalg.norm(get_slip_magnitude(e))
slip += e.length * est * slip_mag
slip_low += e.length * est_low * slip_mag
slip_high += e.length * est_high * slip_mag
s = (slip / total_length) / 1000
s_low = (slip_low / total_length) / 1000
s_high = (slip_high / total_length) / 1000
slip_err = s_high - s
# s = 6.1 / 1000
# s_low = 4.6 / 1000
# s_high = 7.6 / 1000
T = np.linspace(0, 3000, 100)
d = T * s
T_high = d / s_low
T_low = d / s_high
wenchuan_d = 4.0
wenchuan_T_low = wenchuan_d / s_low
wenchuan_T = wenchuan_d / s
wenchuan_T_high = wenchuan_d / s_high
print("Wenchuan recurrence: " + str(wenchuan_T) + " (low: " + str(wenchuan_T_low) + ", high: " + str(wenchuan_T_high) + ")")
a_wells = 6.93
b_wells = 0.82
mag7_ad = np.exp((7.0 - a_wells) / b_wells)
mag7_T = mag7_ad / s
paleo_T = 2300
paleo_ad = paleo_T * s
paleo_mag = (np.log(paleo_ad) * b_wells) + a_wells
plt.plot(d, T, 'k-')
plt.fill_between(d, T_low, T_high, facecolor = '#AAAAAA')
plt.plot([0, paleo_ad + 100], [paleo_T, paleo_T], 'k--')
plt.plot([wenchuan_d, mag7_ad, paleo_ad], [wenchuan_T, mag7_T, paleo_T],
linestyle = 'None',
marker = 'o',
markeredgewidth = 4.0,
markeredgecolor = (0, 0, 0, 1.0),
markerfacecolor = (1, 1, 1, 1.0),
markersize = 15)
# Plot Wenchuan
text = 'Wenchuan-like $\\textrm{M}_{\\textrm{w}}$ 7.9 (' + '%.0f'%wenchuan_d + ' m, ' +\
'%.0f'%wenchuan_T + ' years)'
plt.annotate(text, (wenchuan_d, wenchuan_T),
xytext = (wenchuan_d + 0.5, wenchuan_T - 50))
# Plot the Mw 7 pt
text = 'Typical $\\textrm{M}_{\\textrm{w}}$ 7.0 (' + '%.0f'%mag7_ad + ' m, ' +\
'%.0f'%mag7_T + ' years)'
plt.annotate(text, (mag7_ad, mag7_T),
xytext = (mag7_ad + 0.9, mag7_T - 30))
# Plot the paleoseismic pt
text = 'Low paleoseismic estimate'
plt.text(1.7, 2350, text)
text = '($Ran$ $et$ $al.$ 2010)'
plt.text(1.7, 2200, text)
text = '$\\textrm{M}_{\\textrm{w}}$ ' + '%0.f'%paleo_mag + ', ' + '%0.f'%paleo_ad + ' m'
plt.annotate(text, (paleo_ad, paleo_T),
xytext = (paleo_ad - 3.2, paleo_T + 30))
plt.text(2.0, 40, '($Wells$ $and$ $Coppersmith$ 1994)')
plt.text(0.5, 1800, 'average slip rate = ' + '%.1f'%(s * 1000) + ' $\pm$ %.1f'%(slip_err * 1000) + ' mm/yr')
plt.ylabel('$T$ (years)')
plt.xlabel('$d$ (meters)')
plt.ylim([0, 2500])
plt.xlim([0, 2500 * s])
width = 7.0
fig.set_size_inches([width, (6.0 / 8.0) * width])
plt.savefig('hazard_' + which_model)
if __name__ == '__main__':
main()
| mit | 8,852,281,276,699,813,000 | 33.201923 | 128 | 0.536688 | false |
jbest/digitization_tools | sort/sort.py | 1 | 5387 | import argparse
#import glob
#import os
import shutil
from pathlib import Path
DEFAULT_HERBARIUM_PREFIX = 'BRIT'
DEFAULT_FOLDER_INCREMENT = 1000
DEFAULT_NUMBER_PAD = 7
files_analyzed = 0
files_sorted = 0
verbose = False
# set up argument parser
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--directory", required=True, \
help="Path to the directory that contains the images to be sorted.")
ap.add_argument("-o", "--output_directory", required=True, default=None,\
help="Path to an existing directory where sorted files and directories will be written.")
ap.add_argument("-p", "--pattern", required=True, \
help="Pattern of filenames to be sorted - eg '*.jpg'")
ap.add_argument("-r", "--recursive", action="store_true", \
help="Recurse sub-directories")
ap.add_argument("-c", "--catalog_prefix", default=DEFAULT_HERBARIUM_PREFIX, \
help="Prefix string for catalog numbers. Default is BRIT.")
ap.add_argument("-i", "--increment", default=DEFAULT_FOLDER_INCREMENT, \
help="Increment for folder numbers.")
ap.add_argument("-l", "--length", default=DEFAULT_NUMBER_PAD, \
help="Length for folder numbers, pre-padded with 0.")
ap.add_argument("-v", "--verbose", action="store_true", \
help="Detailed output.")
ap.add_argument("-n", "--dry_run", action="store_true", \
help="No files moved, no directories created.")
args = vars(ap.parse_args())
HERBARIUM_PREFIX = args["catalog_prefix"]
FOLDER_INCREMENT = int(args["increment"])
PAD = int(args["length"])
recurse_subdirectories = args["recursive"]
dry_run = args["dry_run"]
def sort_file(source=None, destination=None):
global files_sorted
if destination.exists():
if dry_run:
print('DRY-RUN: Filename exists, cannot move:', destination)
if verbose:
print('Filename exists, cannot move:', destination)
return False
else:
if dry_run:
print('DRY-RUN: Moved:', destination)
else:
shutil.move(source, destination)
if verbose:
print('Moved:', destination)
files_sorted += 1
return True
#iterate files matching pattern in directory passed from args
#source_directory_path = os.path.realpath(args["directory"])
source_directory_path = Path(args["directory"])
pattern = args["pattern"]
output_directory = args["output_directory"]
output_directory_path = Path(output_directory)
if output_directory_path:
# test to ensure output_directory exists
#if os.path.isdir(output_directory):
if output_directory_path.is_dir():
print('output_directory_path:', output_directory_path)
else:
print(f'ERROR: directory {output_directory_path} does not exist.')
print('Terminating script.')
quit()
if args['verbose']:
verbose = True
print("Verbose report...")
if dry_run:
print('DRY-RUN: starting dry run:')
print('Scanning directory:', source_directory_path, 'for files matching', pattern)
if recurse_subdirectories:
path_matches = source_directory_path.rglob(pattern)
else:
path_matches = source_directory_path.glob(pattern)
#for matching_path in source_directory_path.rglob('*.jpg'):
for matching_path in path_matches:
files_analyzed += 1
#basename = os.path.basename(source_path)
basename = matching_path.name
if basename.startswith(HERBARIUM_PREFIX):
file_name = matching_path.stem
file_extension = matching_path.suffix
#print('file_name:', file_name)
#print('file_extension:', file_extension)
accession_id = file_name[len(HERBARIUM_PREFIX):]
try:
accession_number = int(accession_id)
folder_number = int(accession_number//FOLDER_INCREMENT*FOLDER_INCREMENT)
padded_folder_number = str(folder_number).zfill(PAD)
# zfill may be deprecated in future? Look into string formatting with fill
# https://stackoverflow.com/a/339013
#destination_folder_name = HERBARIUM_PREFIX + str(int(accession_number//FOLDER_INCREMENT*FOLDER_INCREMENT))
destination_folder_name = HERBARIUM_PREFIX + padded_folder_number
if output_directory:
output_directory_path = Path(output_directory)
destination_directory_path = output_directory_path.joinpath(destination_folder_name)
else:
# no output_directory specified, using source directory
destination_directory_path = source_directory_path.joinpath(destination_folder_name)
destination_file_path = destination_directory_path.joinpath(basename)
# Check if destination directory exists
if destination_directory_path.is_dir():
sort_file(source=matching_path, destination=destination_file_path)
else:
if verbose:
print('Creating folder: ' + destination_directory_path)
destination_directory_path.mkdir()
sort_file(source=matching_path, destination=destination_file_path)
except ValueError:
print('Cannot parse', file_name)
else:
if verbose:
print(f'Ignoring {basename} - does not start with {HERBARIUM_PREFIX}.')
print('Sort complete.')
print('Encountered files:', files_analyzed)
print('Sorted files:', files_sorted)
if dry_run:
print('DRY-RUN: ending dry run.')
| mit | -1,070,387,984,230,873,100 | 39.810606 | 119 | 0.664377 | false |
aurelieladier/openturns | python/test/t_NumericalSample_csv.py | 1 | 3101 | #! /usr/bin/env python
from __future__ import print_function
from openturns import *
from os import getenv
TESTPREAMBLE()
try:
Log.Show(Log.INFO)
# 1st sample
f = open('sample.csv', 'w')
f.write(
"X1;X2;X3;X4\n-1.2;2.3;3.4;-4.5\n5.6;-6.7;7.8;8.9\n-0.1;3.2;5.1;7.5\n")
f.close()
aSample = NumericalSample.ImportFromCSVFile("sample.csv", ';')
aSample.setName("a good sample")
print("aSample=", repr(aSample))
aSample = NumericalSample.ImportFromTextFile("sample.csv", ";")
aSample.setName("a good sample")
print("aSample=", repr(aSample))
# 2nd sample
f = open('sample.csv', 'w')
f.write(
"X1,X2,X3,X4\n-1.2,2.3,3.4,-4.5\n5.6,-6.7,7.8,8.9\n-0.1,3.2,5.1,7.5\n")
f.close()
aSample = NumericalSample.ImportFromCSVFile("sample.csv", ",")
aSample.setName("a good coma separated sample")
print("aSample=", repr(aSample))
aSample = NumericalSample.ImportFromTextFile("sample.csv", ",")
aSample.setName("a good coma separated sample")
print("aSample=", repr(aSample))
# export
aSample.exportToCSVFile('sample.csv')
with open('sample.csv') as f:
print((f.read()))
# import with quotes in description
aSample = NumericalSample.ImportFromTextFile('sample.csv', ';')
print('import with quotes:', repr(aSample))
# 3rd sample
f = open('sample.csv', 'w')
f.write(
"X1 X2 X3 X4\n-1.2 2.3 3.4 -4.5\n5.6 -6.7 7.8 8.9\n-0.1 3.2 5.1 7.5\n")
f.close()
aSample = NumericalSample.ImportFromTextFile("sample.csv")
aSample.setName("a good sample with spaces")
print("aSample=", repr(aSample))
# 4th sample
f = open('sample.csv', 'w')
f.write(
"-1.2;2.3;3.4;-4.5\n5.6;-xxx;7.8;8.9\n-0.1;3.2;5..1;7.5\n0.9;9.8;8.4;5.4\n")
f.close()
aSample = NumericalSample.ImportFromCSVFile("sample.csv", ';')
aSample.setName("a sample with bad entries")
print("aSample with bad entries (see log)=", repr(aSample))
aSample = NumericalSample.ImportFromTextFile("sample.csv", ";")
aSample.setName("a sample with bad entries")
print("aSample with bad entries (see log)=", repr(aSample))
# 5th sample
f = open('sample.csv', 'w')
f.write(
"-1.2;2.3;3.4;-4.5\n5.6;-6.7;7.8\n-0.1;3.2;;7.5\n6.5;9.0;7.3;-3.7\n")
f.close()
aSample = NumericalSample.ImportFromCSVFile("sample.csv", ';')
aSample.setName("a sample with missing entries")
print("aSample with missing entries (see log)=", repr(aSample))
aSample = NumericalSample.ImportFromTextFile("sample.csv", ";")
aSample.setName("a sample with missing entries")
print("aSample with missing entries (see log)=", repr(aSample))
f = open('sample.csv', 'w')
f.write(
'"X1";"X2!()#{}%&<=>^$+-*./:\\|`?";"X3[unit]"\n5.6;-6.7;7.8\n-0.1;3.2;7.5 \n')
f.close()
aSample = NumericalSample.ImportFromCSVFile('sample.csv', ';')
print('aSample with special chars=', repr(aSample))
os.remove('sample.csv')
except:
import sys
print("t_NumericalSample_csv.py", sys.exc_info()[0], sys.exc_info()[1])
| lgpl-3.0 | 1,588,056,871,728,396,000 | 31.989362 | 86 | 0.613995 | false |
tsileo/incremental-backups-tools | incremental_backups_tools/__init__.py | 1 | 11140 | # -*- coding: utf-8 -*-
import os
import tarfile
import logging
import tempfile
import shutil
from datetime import datetime
import json
import itertools
import librsync
from dirtools import Dir, DirState, compute_diff
import sigvault
logging.basicConfig(level=logging.INFO)
log = logging
CACHE_PATH = os.path.expanduser('~/.cache/bakthat')
if not os.path.exists(CACHE_PATH):
os.makedirs(CACHE_PATH)
class FileFinder(object):
base_paths = [CACHE_PATH, tempfile.gettempdir()]
@classmethod
def make_key(cls, key_type, key, dt):
ext = 'tgz'
if key_type == 'state':
ext = 'json'
return '{0}.{1}.{2}.{3}'.format(key,
key_type,
dt.isoformat(),
ext)
@classmethod
def check(cls, path):
for bp in cls.base_paths:
abs_path = os.path.join(bp, path)
if os.path.exists(abs_path):
return abs_path
@classmethod
def check_key(cls, key_type, key, dt):
k = cls.make_key(key_type, key, dt)
return cls.check(k)
def full_backup(path, cache_path=None):
if cache_path is None:
cache_path = tempfile.gettempdir()
backup_date = datetime.utcnow()
backup_dir = Dir(path)
backup_key = backup_dir.path.strip('/').split('/')[-1]
backup_dir_state = DirState(backup_dir)
state_file = backup_dir_state.to_json(cache_path, dt=backup_date, fmt='{0}.state.{1}.json')
created_file = FileFinder.make_key('full',
backup_key,
backup_date)
created_file = os.path.join(cache_path, created_file)
backup_dir.compress_to(created_file)
# Create a new SigVault
sigvault_file = FileFinder.make_key('sigvault',
backup_key,
backup_date)
sigvault_file = os.path.join(CACHE_PATH, sigvault_file)
sv = sigvault.open_vault(sigvault_file, 'w', base_path=backup_dir.path)
for f in backup_dir.iterfiles():
sv.add(f)
sv.close()
files = [state_file, created_file, sigvault_file]
files = [{'path': f, 'size': os.path.getsize(f)} for f in files]
total = sum([f['size'] for f in files])
return {'backup_key': backup_key, 'backup_date': backup_date, 'files': files, 'total': total}
def incremental_backup(path, cache_path=None):
if cache_path is None:
cache_path = tempfile.gettempdir()
files = []
backup_date = datetime.utcnow()
backup_dir = Dir(path)
backup_key = backup_dir.path.strip('/').split('/')[-1]
# TODO check if it's really the last state on the remote storage
last_state = Dir(cache_path).get('{0}.state.*'.format(backup_key), sort_reverse=True, abspath=True)
last_state = DirState.from_json(last_state)
current_state = DirState(backup_dir)
last_sv = sigvault.SigVaultReader(CACHE_PATH, backup_key)
diff = current_state - last_state
state_file = current_state.to_json(cache_path, dt=backup_date, fmt='{0}.state.{1}.json')
files.append(state_file)
created_file = FileFinder.make_key('created',
backup_key,
backup_date)
created_file = os.path.join(cache_path, created_file)
# Store files from diff['created'] into a new archive
created_file = process_created(created_file,
diff['created'],
backup_dir.path)
if created_file:
files.append(created_file)
updated_file = FileFinder.make_key('updated',
backup_key,
backup_date)
updated_file = os.path.join(cache_path, updated_file)
# Compute and store delta from the list of updated files
updated_file = process_updated(updated_file,
diff['updated'],
backup_dir.path,
last_sv)
if updated_file:
files.append(updated_file)
if diff['created'] or diff['updated']:
sigvault_file = FileFinder.make_key('sigvault',
backup_key,
backup_date)
sigvault_file = os.path.join(CACHE_PATH, sigvault_file)
new_sv = sigvault.open_vault(sigvault_file, 'w', base_path=backup_dir.path)
for f in itertools.chain(diff['created'], diff['updated']):
new_sv.add(f)
new_sv.close()
files.append(sigvault_file)
files = [{'path': f, 'size': os.path.getsize(f)} for f in files]
total = sum([f['size'] for f in files])
return {'backup_key': backup_key, 'backup_date': backup_date, 'files': files, 'total': total}
def process_created(path, created, base_path):
""" Put new files in a new archive. """
if created:
created_archive = tarfile.open(path, 'w:gz')
for f in created:
f_abs = os.path.join(base_path, f)
created_archive.add(f_abs, arcname=f)
created_archive.close()
return path
def process_updated(path, updated, base_path, sigvault):
""" Process upated files, create a new SigVault if needed,
and create a new archives with delta (from the previous SigVault signatures).
"""
if updated:
updated_archive = tarfile.open(path, 'w:gz')
for f in updated:
f_abs = os.path.join(base_path, f)
delta = librsync.delta(open(f_abs, 'rb'),
sigvault.extract(f))
delta_size = os.fstat(delta.fileno()).st_size
delta_info = tarfile.TarInfo(f)
delta_info.size = delta_size
updated_archive.addfile(delta_info, delta)
updated_archive.close()
return path
def patch_diff(base_path, diff, created_archive=None, updated_archive=None):
# First, we iterate the created files
if diff['created']:
for crtd in diff['created']:
created_tar = tarfile.open(created_archive, 'r:gz')
try:
src_file = created_tar.extractfile(crtd)
abspath = os.path.join(base_path, crtd)
dirname = os.path.dirname(abspath)
# Create directories if they doesn't exist yet
if not os.path.exists(dirname):
os.makedirs(dirname)
# We copy the file from the archive directly to its destination
with open(abspath, 'wb') as f:
shutil.copyfileobj(src_file, f)
except KeyError as exc:
# It means that a file is missing in the archive.
log.exception(exc)
raise Exception("Diff seems corrupted.")
finally:
created_tar.close()
# Next, we iterate updated files in order to patch them
if diff['updated']:
for updtd in diff['updated']:
try:
updated_tar = tarfile.open(updated_archive, 'r:gz')
abspath = os.path.join(base_path, updtd)
# Load the librsync delta
delta_file = updated_tar.extractfile(updtd)
# A tempfile file to store the patched file/result
# before replacing the original
patched = tempfile.NamedTemporaryFile()
# Patch the current version of the file with the delta
# and store the result in the previously created tempfile
with open(abspath, 'rb') as f:
librsync.patch(f, delta_file, patched)
patched.seek(0)
# Now we replace the orignal file with the patched version
with open(abspath, 'wb') as f:
shutil.copyfileobj(patched, f)
patched.close()
except KeyError as exc:
# It means that a file is missing in the archive.
log.exception(exc)
raise Exception("Diff seems corrupted.")
finally:
updated_tar.close()
# Then, we iterate the deleted files
for dltd in diff['deleted']:
abspath = os.path.join(base_path, dltd)
if os.path.isfile(abspath):
os.remove(abspath)
# Finally, we iterate the deleted directories
for dltd_drs in diff['deleted_dirs']:
abspath = os.path.join(base_path, dltd_drs)
if os.path.isdir(abspath):
os.rmdir(abspath)
def _extract_dt_from_key(key):
key_date = '.'.join(key.split('.')[-3:-1])
key_dt = datetime.strptime(key_date, '%Y-%m-%dT%H:%M:%S.%f')
return key_date, key_dt
def get_full_and_incremental(key, cache_path=None):
""" From a directory as source, iterate over states files from a full backup,
till the end/or another full backup. The first item is actually the full backup. """
if cache_path is None:
cache_path = tempfile.gettempdir()
_dir = Dir(cache_path)
last_full = _dir.get('{0}.full.*'.format(key), sort_reverse=True, abspath=True)
last_full_date, last_full_dt = _extract_dt_from_key(last_full)
previous_state = FileFinder.check_key('state', key, last_full_dt)
yield last_full, None, last_full_dt
for s_file in _dir.files('{0}.state.*'.format(key)):
s_str = '.'.join(s_file.split('.')[-3:-1])
s_dt = datetime.strptime(s_str, '%Y-%m-%dT%H:%M:%S.%f')
if s_dt > last_full_dt and not FileFinder.check_key('full', key, s_dt):
yield s_file, previous_state, s_dt
previous_state = s_file
def restore_backup(key, dest, cache_path=None):
""" Restore backups given the key to dest using cache_path as source
for state and deltas. """
if cache_path is None:
cache_path = tempfile.gettempdir()
for index, (state_file, previous_state_file, state_dt) in enumerate(get_full_and_incremental(key)):
if index == 0:
# At index == 0, state is the full archive
log.info('Restored full backup ({})'.format(state_dt))
tarfile.open(state_file, 'r:gz').extractall(dest)
else:
with open(state_file, 'rb') as f:
state = json.loads(f.read())
with open(previous_state_file, 'rb') as f:
previous_state = json.loads(f.read())
diff = compute_diff(state, previous_state)
patch_diff(dest, diff,
FileFinder.check_key('created', key, state_dt),
FileFinder.check_key('updated', key, state_dt))
log.info('Patched incremental backup ({})'.format(state_dt))
return dest
def get_full_backups(key, cache_path=None):
if cache_path is None:
cache_path = tempfile.gettempdir()
_dir = Dir(cache_path)
fulls = _dir.files('{0}.full.*'.format(key), sort_reverse=True, abspath=True)
fulls = [_extract_dt_from_key(k)[1] for k in fulls]
return fulls
| mit | 4,781,952,771,890,564,000 | 33.918495 | 103 | 0.566299 | false |
gxxjjj/QuantEcon.py | quantecon/markov/core.py | 1 | 19386 | r"""
Authors: Chase Coleman, Spencer Lyon, Daisuke Oyama, Tom Sargent,
John Stachurski
Filename: core.py
This file contains some useful objects for handling a finite-state
discrete-time Markov chain.
Definitions and Some Basic Facts about Markov Chains
----------------------------------------------------
Let :math:`\{X_t\}` be a Markov chain represented by an :math:`n \times
n` stochastic matrix :math:`P`. State :math:`i` *has access* to state
:math:`j`, denoted :math:`i \to j`, if :math:`i = j` or :math:`P^k[i, j]
> 0` for some :math:`k = 1, 2, \ldots`; :math:`i` and `j` *communicate*,
denoted :math:`i \leftrightarrow j`, if :math:`i \to j` and :math:`j \to
i`. The binary relation :math:`\leftrightarrow` is an equivalent
relation. A *communication class* of the Markov chain :math:`\{X_t\}`,
or of the stochastic matrix :math:`P`, is an equivalent class of
:math:`\leftrightarrow`. Equivalently, a communication class is a
*strongly connected component* (SCC) in the associated *directed graph*
:math:`\Gamma(P)`, a directed graph with :math:`n` nodes where there is
an edge from :math:`i` to :math:`j` if and only if :math:`P[i, j] > 0`.
The Markov chain, or the stochastic matrix, is *irreducible* if it
admits only one communication class, or equivalently, if
:math:`\Gamma(P)` is *strongly connected*.
A state :math:`i` is *recurrent* if :math:`i \to j` implies :math:`j \to
i`; it is *transient* if it is not recurrent. For any :math:`i, j`
contained in a communication class, :math:`i` is recurrent if and only
if :math:`j` is recurrent. Therefore, recurrence is a property of a
communication class. Thus, a communication class is a *recurrent class*
if it contains a recurrent state. Equivalently, a recurrent class is a
SCC that corresponds to a sink node in the *condensation* of the
directed graph :math:`\Gamma(P)`, where the condensation of
:math:`\Gamma(P)` is a directed graph in which each SCC is replaced with
a single node and there is an edge from one SCC :math:`C` to another SCC
:math:`C'` if :math:`C \neq C'` and some node in :math:`C` has access to
some node in :math:`C'`. A recurrent class is also called a *closed
communication class*. The condensation is acyclic, so that there exists
at least one recurrent class.
For example, if the entries of :math:`P` are all strictly positive, then
the whole state space is a communication class as well as a recurrent
class. (More generally, if there is only one communication class, then
it is a recurrent class.) As another example, consider the stochastic
matrix :math:`P = [[1, 0], [0,5, 0.5]]`. This has two communication
classes, :math:`\{0\}` and :math:`\{1\}`, and :math:`\{0\}` is the only
recurrent class.
A *stationary distribution* of the Markov chain :math:`\{X_t\}`, or of
the stochastic matrix :math:`P`, is a nonnegative vector :math:`x` such
that :math:`x' P = x'` and :math:`x' \mathbf{1} = 1`, where
:math:`\mathbf{1}` is the vector of ones. The Markov chain has a unique
stationary distribution if and only if it has a unique recurrent class.
More generally, each recurrent class has a unique stationary
distribution whose support equals that recurrent class. The set of all
stationary distributions is given by the convex hull of these unique
stationary distributions for the recurrent classes.
A natural number :math:`d` is the *period* of state :math:`i` if it is
the greatest common divisor of all :math:`k`'s such that :math:`P^k[i,
i] > 0`; equivalently, it is the GCD of the lengths of the cycles in
:math:`\Gamma(P)` passing through :math:`i`. For any :math:`i, j`
contained in a communication class, :math:`i` has period :math:`d` if
and only if :math:`j` has period :math:`d`. The *period* of an
irreducible Markov chain (or of an irreducible stochastic matrix) is the
period of any state. We define the period of a general (not necessarily
irreducible) Markov chain to be the least common multiple of the periods
of its recurrent classes, where the period of a recurrent class is the
period of any state in that class. A Markov chain is *aperiodic* if its
period is one. A Markov chain is irreducible and aperiodic if and only
if it is *uniformly ergodic*, i.e., there exists some :math:`m` such
that :math:`P^m[i, j] > 0` for all :math:`i, j` (in this case, :math:`P`
is also called *primitive*).
Suppose that an irreducible Markov chain has period :math:`d`. Fix any
state, say state :math:`0`. For each :math:`m = 0, \ldots, d-1`, let
:math:`S_m` be the set of states :math:`i` such that :math:`P^{kd+m}[0,
i] > 0` for some :math:`k`. These sets :math:`S_0, \ldots, S_{d-1}`
constitute a partition of the state space and are called the *cyclic
classes*. For each :math:`S_m` and each :math:`i \in S_m`, we have
:math:`\sum_{j \in S_{m+1}} P[i, j] = 1`, where :math:`S_d = S_0`.
"""
from __future__ import division
import numpy as np
from scipy import sparse
from fractions import gcd
from .gth_solve import gth_solve
from ..graph_tools import DiGraph
# -Check if Numba is Available- #
from ..util import searchsorted, check_random_state, numba_installed, jit
class MarkovChain(object):
"""
Class for a finite-state discrete-time Markov chain. It stores
useful information such as the stationary distributions, and
communication, recurrent, and cyclic classes, and allows simulation
of state transitions.
Parameters
----------
P : array_like or scipy sparse matrix (float, ndim=2)
The transition matrix. Must be of shape n x n.
Attributes
----------
P : ndarray or scipy.sparse.csr_matrix (float, ndim=2)
See Parameters
stationary_distributions : array_like(float, ndim=2)
Array containing stationary distributions, one for each
recurrent class, as rows.
is_irreducible : bool
Indicate whether the Markov chain is irreducible.
num_communication_classes : int
The number of the communication classes.
communication_classes : list(ndarray(int))
List of numpy arrays containing the communication classes.
num_recurrent_classes : int
The number of the recurrent classes.
recurrent_classes : list(ndarray(int))
List of numpy arrays containing the recurrent classes.
is_aperiodic : bool
Indicate whether the Markov chain is aperiodic.
period : int
The period of the Markov chain.
cyclic_classes : list(ndarray(int))
List of numpy arrays containing the cyclic classes. Defined only
when the Markov chain is irreducible.
Notes
-----
In computing stationary distributions, if the input matrix is a
sparse matrix, internally it is converted to a dense matrix.
"""
def __init__(self, P):
if sparse.issparse(P): # Sparse matrix
self.P = sparse.csr_matrix(P)
self.is_sparse = True
else: # Dense matrix
self.P = np.asarray(P)
self.is_sparse = False
# Check Properties
# Double check that P is a square matrix
if len(self.P.shape) != 2 or self.P.shape[0] != self.P.shape[1]:
raise ValueError('P must be a square matrix')
# The number of states
self.n = self.P.shape[0]
# Double check that P is a nonnegative matrix
if not self.is_sparse:
data_nonnegative = (self.P >= 0) # ndarray
else:
data_nonnegative = (self.P.data >= 0) # csr_matrx
if not np.all(data_nonnegative):
raise ValueError('P must be nonnegative')
# Double check that the rows of P sum to one
row_sums = self.P.sum(axis=1)
if self.is_sparse: # row_sums is np.matrix (ndim=2)
row_sums = row_sums.getA1()
if not np.allclose(row_sums, np.ones(self.n)):
raise ValueError('The rows of P must sum to 1')
# To analyze the structure of P as a directed graph
self._digraph = None
self._stationary_dists = None
self._cdfs = None # For dense matrix
self._cdfs1d = None # For sparse matrix
def __repr__(self):
msg = "Markov chain with transition matrix \nP = \n{0}"
if self._stationary_dists is None:
return msg.format(self.P)
else:
msg = msg + "\nand stationary distributions \n{1}"
return msg.format(self.P, self._stationary_dists)
def __str__(self):
return str(self.__repr__)
@property
def digraph(self):
if self._digraph is None:
self._digraph = DiGraph(self.P)
return self._digraph
@property
def is_irreducible(self):
return self.digraph.is_strongly_connected
@property
def num_communication_classes(self):
return self.digraph.num_strongly_connected_components
@property
def communication_classes(self):
return self.digraph.strongly_connected_components
@property
def num_recurrent_classes(self):
return self.digraph.num_sink_strongly_connected_components
@property
def recurrent_classes(self):
return self.digraph.sink_strongly_connected_components
@property
def is_aperiodic(self):
if self.is_irreducible:
return self.digraph.is_aperiodic
else:
return self.period == 1
@property
def period(self):
if self.is_irreducible:
return self.digraph.period
else:
rec_classes = self.recurrent_classes
# Determine the period, the LCM of the periods of rec_classes
d = 1
for rec_class in rec_classes:
period = self.digraph.subgraph(rec_class).period
d = (d * period) // gcd(d, period)
return d
@property
def cyclic_classes(self):
if not self.is_irreducible:
raise NotImplementedError(
'Not defined for a reducible Markov chain'
)
else:
return self.digraph.cyclic_components
def _compute_stationary(self):
"""
Store the stationary distributions in self._stationary_distributions.
"""
if self.is_irreducible:
if not self.is_sparse: # Dense
stationary_dists = gth_solve(self.P).reshape(1, self.n)
else: # Sparse
stationary_dists = \
gth_solve(self.P.toarray(),
overwrite=True).reshape(1, self.n)
else:
rec_classes = self.recurrent_classes
stationary_dists = np.zeros((len(rec_classes), self.n))
for i, rec_class in enumerate(rec_classes):
if not self.is_sparse: # Dense
stationary_dists[i, rec_class] = \
gth_solve(self.P[rec_class, :][:, rec_class])
else: # Sparse
stationary_dists[i, rec_class] = \
gth_solve(self.P[rec_class, :][:, rec_class].toarray(),
overwrite=True)
self._stationary_dists = stationary_dists
@property
def stationary_distributions(self):
if self._stationary_dists is None:
self._compute_stationary()
return self._stationary_dists
@property
def cdfs(self):
if (self._cdfs is None) and not self.is_sparse:
# See issue #137#issuecomment-96128186
cdfs = np.empty((self.n, self.n), order='C')
np.cumsum(self.P, axis=-1, out=cdfs)
self._cdfs = cdfs
return self._cdfs
@property
def cdfs1d(self):
if (self._cdfs1d is None) and self.is_sparse:
data = self.P.data
indices = self.P.indices
indptr = self.P.indptr
cdfs1d = np.empty(self.P.nnz, order='C')
for i in range(self.n):
cdfs1d[indptr[i]:indptr[i+1]] = \
data[indptr[i]:indptr[i+1]].cumsum()
self._cdfs1d = cdfs1d
return self._cdfs1d
def simulate(self, ts_length, init=None, num_reps=None, random_state=None):
"""
Simulate time series of state transitions.
Parameters
----------
ts_length : scalar(int)
Length of each simulation.
init : scalar(int) or array_like(int, ndim=1),
optional(default=None)
Initial state(s). If None, the initial state is randomly
drawn.
num_reps : scalar(int), optional(default=None)
Number of repetitions of simulation.
random_state : scalar(int) or np.random.RandomState,
optional(default=None)
Random seed (integer) or np.random.RandomState instance to
set the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState
is used.
Returns
-------
X : ndarray(int, ndim=1 or 2)
Array containing the sample path(s), of shape (ts_length,)
if init is a scalar (integer) or None and num_reps is None;
of shape (k, ts_length) otherwise, where k = len(init) if
(init, num_reps) = (array, None), k = num_reps if (init,
num_reps) = (int or None, int), and k = len(init)*num_reps
if (init, num_reps) = (array, int).
"""
random_state = check_random_state(random_state)
dim = 1 # Dimension of the returned array: 1 or 2
try:
k = len(init) # init is an array
dim = 2
init_states = np.asarray(init, dtype=int)
if num_reps is not None:
k *= num_reps
init_states = np.tile(init_states, num_reps)
except TypeError: # init is a scalar(int) or None
k = 1
if num_reps is not None:
dim = 2
k = num_reps
if init is None:
init_states = random_state.randint(self.n, size=k)
elif isinstance(init, int):
init_states = np.ones(k, dtype=int) * init
else:
raise ValueError(
'init must be int, array_like of ints, or None'
)
# === set up array to store output === #
X = np.empty((k, ts_length), dtype=int)
# Random values, uniformly sampled from [0, 1)
random_values = random_state.random_sample(size=(k, ts_length-1))
# Generate sample paths and store in X
if not self.is_sparse: # Dense
_generate_sample_paths(
self.cdfs, init_states, random_values, out=X
)
else: # Sparse
_generate_sample_paths_sparse(
self.cdfs1d, self.P.indices, self.P.indptr, init_states,
random_values, out=X
)
if dim == 1:
return X[0]
else:
return X
def _generate_sample_paths(P_cdfs, init_states, random_values, out):
"""
Generate num_reps sample paths of length ts_length, where num_reps =
out.shape[0] and ts_length = out.shape[1].
Parameters
----------
P_cdfs : ndarray(float, ndim=2)
Array containing as rows the CDFs of the state transition.
init_states : array_like(int, ndim=1)
Array containing the initial states. Its length must be equal to
num_reps.
random_values : ndarray(float, ndim=2)
Array containing random values from [0, 1). Its shape must be
equal to (num_reps, ts_length-1)
out : ndarray(int, ndim=2)
Array to store the sample paths.
Notes
-----
This routine is jit-complied if the module Numba is vailable.
"""
num_reps, ts_length = out.shape
for i in range(num_reps):
out[i, 0] = init_states[i]
for t in range(ts_length-1):
out[i, t+1] = searchsorted(P_cdfs[out[i, t]], random_values[i, t])
if numba_installed:
_generate_sample_paths = jit(nopython=True)(_generate_sample_paths)
def _generate_sample_paths_sparse(P_cdfs1d, indices, indptr, init_states,
random_values, out):
"""
For sparse matrix.
Generate num_reps sample paths of length ts_length, where num_reps =
out.shape[0] and ts_length = out.shape[1].
Parameters
----------
P_cdfs1d : ndarray(float, ndim=1)
1D array containing the CDFs of the state transition.
indices : ndarray(int, ndim=1)
CSR format index array.
indptr : ndarray(int, ndim=1)
CSR format index pointer array.
init_states : array_like(int, ndim=1)
Array containing the initial states. Its length must be equal to
num_reps.
random_values : ndarray(float, ndim=2)
Array containing random values from [0, 1). Its shape must be
equal to (num_reps, ts_length-1)
out : ndarray(int, ndim=2)
Array to store the sample paths.
Notes
-----
This routine is jit-complied if the module Numba is vailable.
"""
num_reps, ts_length = out.shape
for i in range(num_reps):
out[i, 0] = init_states[i]
for t in range(ts_length-1):
k = searchsorted(P_cdfs1d[indptr[out[i, t]]:indptr[out[i, t]+1]],
random_values[i, t])
out[i, t+1] = indices[indptr[out[i, t]]+k]
if numba_installed:
_generate_sample_paths_sparse = \
jit(nopython=True)(_generate_sample_paths_sparse)
def mc_compute_stationary(P):
"""
Computes stationary distributions of P, one for each recurrent
class. Any stationary distribution is written as a convex
combination of these distributions.
Returns
-------
stationary_dists : array_like(float, ndim=2)
Array containing the stationary distributions as its rows.
"""
return MarkovChain(P).stationary_distributions
def mc_sample_path(P, init=0, sample_size=1000, random_state=None):
"""
Generates one sample path from the Markov chain represented by
(n x n) transition matrix P on state space S = {{0,...,n-1}}.
Parameters
----------
P : array_like(float, ndim=2)
A Markov transition matrix.
init : array_like(float ndim=1) or scalar(int), optional(default=0)
If init is an array_like, then it is treated as the initial
distribution across states. If init is a scalar, then it
treated as the deterministic initial state.
sample_size : scalar(int), optional(default=1000)
The length of the sample path.
random_state : scalar(int) or np.random.RandomState,
optional(default=None)
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
X : array_like(int, ndim=1)
The simulation of states.
"""
random_state = check_random_state(random_state)
if isinstance(init, int):
X_0 = init
else:
cdf0 = np.cumsum(init)
u_0 = random_state.random_sample()
X_0 = searchsorted(cdf0, u_0)
mc = MarkovChain(P)
return mc.simulate(ts_length=sample_size, init=X_0,
random_state=random_state)
| bsd-3-clause | -8,939,832,694,485,968,000 | 35.033457 | 79 | 0.615444 | false |
JNPRAutomate/pyCliConf | examples/ztp-full.py | 1 | 13115 | #!/usr/bin/python -tt
import sys
import subprocess
from datetime import datetime
# Jijna2 is not supported until Junos 14.1X53, so catch exception on versions without this library.
try:
from jinja2 import Template
JINJA_SUPPORT = True
except:
JINJA_SUPPORT = False
class CliConf():
"""
CliConf
This class is designed to be used for configuring a Junos device
using only the libraries supported by the Junos Enhanced Automation
images.
Args:
:Debug: Ensure log() method prints output to stdout and logfile. Defaults to False, and all log() output only goes to logfile.
:logfile: Destination logfile for log() method. Defaults to "/var/root/ztp-log.txt" as this is a persistant writable location during ZTP.
Examples:
Basic device connection:
.. code-block:: python
from pyCliConf import CliConf
dev = CliConf()
dev.load_config(config_file = "/var/tmp/set.cfg", action = "set")
dev.commit()
dev.close()
NOTE: When committing configuration with this script, please ensure that "chassis auto-image-upgrade" is in the configuration, otherwise "Auto Image Upgrade" process will exit and mark the script as a failure.
"""
def __init__(self, logfile="/var/root/ztp-log.txt", Debug=False):
self.session = " "
self.logfile = open(logfile, "a", 0)
self.debug = Debug
try:
self.session = subprocess.Popen(['/usr/sbin/cli', 'xml-mode', 'netconf'], stdin=subprocess.PIPE, stdout=self.logfile, stderr=self.logfile)
except Exception as err:
print "RPC Session Error: %r \n\t Are you on Junos?\n" % err
def close(self):
"""
Close a NETCONF session.
"""
rpc_close = """
<rpc>
<close-session/>
</rpc>
]]>]]>
"""
try:
self.rpc(rpc_close)
except Exception as err:
errmsg = "RPC Close Error: %r" % err
self.log(errmsg)
try:
self.logfile.close()
except Exception as err:
errmsg = "Error closing logfile: %r" % err
self.log(errmsg)
def commit(self):
"""
Commit current candidate configuration
NOTE: When committing configuration with this script, please ensure that "chassis auto-image-upgrade" is in the configuration, otherwise "Auto Image Upgrade" process will exit and mark the script as a failure.
"""
rpc_commit = """
<rpc>
<commit/>
</rpc>
]]>]]>
"""
try:
self.rpc(rpc_commit)
except Exception as err:
errmsg = "RPC Commit Error: %r" % err
self.log(errmsg)
def install_package(self, url, no_copy=True, no_validate=True, unlink = True, reboot=False):
"""
Install Junos package onto the system.
The primary use case is for performing Junos upgrades during the
ZTP process, though any package should work.
NOTE: "reboot" is set by default to "False", to ensure you do not
surprise yourself.
Args:
:url: string containing Junos standard URL scheme:
- File path (eg /var/tmp/config.cfg)
- FTP (eg ftp://username:password@hostname/path/filename)
- HTTP (eg http://username:password@hostname/path/filename)
:no_copy: Defaults to True
:no_validate: Defaults to True
:unlink: Defaults to True
:reboot: Defaults to False
Example:
.. code-block:: python
from pyCliConf import CliConf
dev = CliConf()
dev.install_package("http://172.32.32.254/jinstall-X.Y.tgz", reboot=True)
dev.close()
"""
if no_copy:
rpc_package_nocopy = "<no-copy/>"
else:
rpc_package_nocopy = ""
if no_validate:
rpc_package_novalidate = "<no-validate/>"
else:
rpc_package_novalidate = ""
if unlink:
rpc_package_unlink = "<unlink/>"
else:
rpc_package_unlink = ""
if reboot:
rpc_package_reboot = "<reboot/>"
else:
rpc_package_reboot = ""
rpc_package = """
<rpc>
<request-package-add>
<package-name>
%s
</package-name>
%s
%s
%s
%s
</request-package-add>
</rpc>
]]>]]>
""" % (url, rpc_package_nocopy, rpc_package_novalidate, rpc_package_unlink, rpc_package_reboot)
rpc_send = rpc_package
try:
self.rpc(rpc_send)
except Exception as err:
errmsg = "Install Package Error: %r" % err
self.log(errmsg)
def load_config(self, cfg_string=False, url=False, cfg_format="text", action="merge"):
"""
Loads Junos configuration from a URL or file location
Args:
:cfg_string: string containing valid Junos configuration syntax
:url: string containing Junos standard URL scheme:
- File path (eg /var/tmp/config.cfg)
- FTP (eg ftp://username:password@hostname/path/filename)
- HTTP (eg http://username:password@hostname/path/filename)
:cfg_format: string containing format of config url or cfg_string
- "text" (eg Junos { } format)
- "xml" (eg XML structured format)
- "set" (eg "set system host-name foo")
:action: string telling Junos how to implement new configuration
in relation to existing configuration on the device.
- 'set'
- 'merge'
- 'overide'
- 'replace'
- 'update'
Examples:
Load configuration from local file using "set" format:
.. code-block:: python
from pyCliConf import CliConf
dev = CliConf()
dev.load_config(config_file = "/var/tmp/set.cfg", action = "set")
NOTE: When committing configuration with this script, please ensure that "chassis auto-image-upgrade" is in the configuration, otherwise "Auto Image Upgrade" process will exit and mark the script as a failure.
"""
try:
cfg_string
url
except Exception as err:
errmsg = "Error: load_config needs either 'cfg_string' or 'url' defined: %r" % err
self.log(errmsg)
if action == "set" or cfg_format == "set":
action_string = ' action = "set" '
cfg_format = "text"
elif action in ['merge', 'overide', 'replace', 'update']:
action_string = ' action = "%s" ' % action
else:
raise Exception("RPC Load Error - Unknown action type")
rpc_load_url = """
<rpc>
<load-configuration url="%s"%sformat="%s" />
</rpc>
]]>]]>
""" % (url, action_string, cfg_format)
rpc_load_text_string = """
<rpc>
<load-configuration%sformat="text">
<configuration-text>
%s
</configuration-text>
</load-configuration>
</rpc>
]]>]]>
""" % (action_string, cfg_string)
rpc_load_set_string = """
<rpc>
<load-configuration%sformat="text">
<configuration-set>
%s
</configuration-set>
</load-configuration>
</rpc>
]]>]]>
""" % (action_string, cfg_string)
rpc_load_string = """
<rpc>
<load-configuration%sformat="xml">
<configuration>
%s
</configuration>
</load-configuration>
</rpc>
]]>]]>
""" % (action_string, cfg_string)
if url:
rpc_send = rpc_load_url
elif action == "set" or cfg_format == "set":
rpc_send = rpc_load_set_string
elif cfg_format == "text":
rpc_send = rpc_load_text_string
elif cfg_format == "xml":
rpc_send = rpc_load_string
try:
self.rpc(rpc_send)
except Exception as err:
errmsg = "RPC Load Error: %r" % err
self.log(errmsg)
def load_config_template(self, template, template_vars, cfg_format="text", action="merge"):
"""
:template: A templated string using Jinja2 templates
:template_vars: A dict containing the vars used in the :template: string
:cfg_format: The type of configuration to load. The default is "text" or a standard Junos config block. Other options are: "set" for set style commands, "xml" for xml configs
:action: Configurtion action. The default is "merge".
Uses standard `Jinja2`_ Templating.
.. _`Jinja2`: http://jinja.pocoo.org/
Example:
.. code-block:: python
from pyCliConf import CliConf
config_template = "system { host-name {{ hostname }}-{{ suffix }}; }"
config_vars = {"hostname": "foo", "suffix": "bah"}
dev = CliConf()
dev.load_config_template(config_template, config_vars)
dev.commit()
dev.close()
NOTE: When committing configuration with this script, please ensure that "chassis auto-image-upgrade" is in the configuration, otherwise "Auto Image Upgrade" process will exit and mark the script as a failure.
"""
if JINJA_SUPPORT == True:
try:
new_template = Template(template)
except Exception as err:
errmsg = "Load_Template New Error: %r" % err
self.log(errmsg)
try:
final_template = new_template.render(template_vars)
except Exception as err:
errmsg = "Load_Template Render Error: %r" % err
self.log(errmsg)
try:
self.load_config(cfg_string=final_template, cfg_format=cfg_format, action=action)
except Exception as err:
errmsg = "RPC Load_Template Send Error: %r" % err
self.log(errmsg)
else:
self.log("Jinja2 Template supported on this software version. First support Junos 14.1X53")
def log(self, msg):
"""
Basic logging function for use by script.
"""
logfile = self.logfile
log_time = self.time()
if self.debug == True:
print(str(log_time) + ": " + str(msg) + "\n")
try:
logfile.write(str(log_time) + ": " + str(msg) + "\n")
except Exception as err:
print "Error logging to file: %r" % err
def reboot(self):
"""
Reboot the device.
"""
rpc_reboot = """
<rpc>
<request-reboot>
</request-reboot>
</rpc>
]]>]]>
"""
try:
self.rpc(rpc_reboot)
except Exception as err:
errmsg = "RPC Reboot Error: %r" % err
self.log(errmsg)
def rpc(self, rpc):
"""
Opens a NETCONF session via CLI session and sends RPC.
Primarily used by other methods.
Args:
:rpc: string containing properly structured NETCONF RPC
"""
try:
log_string = "RPC Data Sent to host:\n %r" % rpc
self.log(log_string)
self.session.stdin.write(rpc)
except Exception as err:
errmsg = "RPC Communication Error: %r" % err
self.log(errmsg)
def time(self):
"""
Basic Time Function for log function use.
"""
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
##############################################################################
# #
# Start Unique Code #
# #
##############################################################################
JUNOS_INSTALL = "http://172.32.32.254/jinstall-qfx-5-flex-14.1X53-D15.2-domestic-signed.tgz"
NEW_CONFIG = """
set system root-authentication encrypted-password "$1$e/sfN/6e$OuvCNcutoPYkl8S19xh/Q/"
set system ntp server 1.1.1.1
set system services netconf ssh
set system host-name ztp-provision-complete-preX53
set system name-server 8.8.8.8
set routing-options static route 0.0.0.0/0 qualified-next-hop 172.32.32.2
"""
dev = CliConf()
dev.log("Loading configuration file")
dev.load_config(cfg_string = NEW_CONFIG, action = "set")
dev.log("Before Commit")
dev.commit()
dev.log("After Commit")
dev.log("Upgrading Junos version")
#dev.install_package(JUNOS_INSTALL, reboot = True)
dev.close()
sys.exit(0)
| apache-2.0 | -7,802,633,541,436,980,000 | 31.705736 | 217 | 0.53069 | false |
garbersc/keras-galaxies | solutionToClass.py | 1 | 1410 | import numpy as np
import os
import csv
with open(TRAIN_LABELS_PATH, 'r') as f:
reader = csv.reader(f, delimiter=",")
train_ids = []
for k, line in enumerate(reader):
if k == 0: continue # skip header
train_ids.append(int(line[0]))
isEndClass = np.asarray([0,0,1,
0,0,
0,0,
0,0,
0,0,0,0,
0,1,
0,0,0,
1,1,1,1,1,1,1,
0,0,0,
0,0,0,
1,1,1,1,1,1])
d = pd.read_csv(TRAIN_LABELS_PATH)
targets = d.as_matrix()[:, 1:].astype('float32')
classes = np.argmax( np.mnultiply(targets,isEndClass) )
TRAIN_IDS_PATH = "data/train_ids.npy"
# TRAIN_LABELS_PATH = "data/raw/solutions_training.csv"
TRAIN_LABELS_PATH = "data/raw/training_solutions_rev1.csv"
import numpy as np
import os
import csv
with open(TRAIN_LABELS_PATH, 'r') as f:
reader = csv.reader(f, delimiter=",")
train_ids = []
for k, line in enumerate(reader):
if k == 0: continue # skip header
train_ids.append(int(line[0]))
train_ids = np.array(train_ids)
print "Saving %s" % TRAIN_IDS_PATH
np.save(TRAIN_IDS_PATH, train_ids)
# TRAIN_LABELS_PATH = "data/raw/solutions_training.csv"
TRAIN_LABELS_PATH = "data/raw/training_solutions_rev1.csv"
TARGET_PATH = "data/solutions_train.npy"
import pandas as pd
import numpy as np
d = pd.read_csv(TRAIN_LABELS_PATH)
targets = d.as_matrix()[:, 1:].astype('float32')
print "Saving %s" % TARGET_PATH
np.save(TARGET_PATH, targets)
| bsd-3-clause | -187,993,293,303,596,700 | 20.692308 | 58 | 0.648227 | false |
unreal666/outwiker | src/outwiker/core/iconmaker.py | 3 | 1151 | # -*- coding: utf-8 -*-
from PIL import Image
from outwiker.core.defines import ICON_WIDTH, ICON_HEIGHT
class IconMaker(object):
""" Class for creation icons by images. """
def create(self, fname_in, fname_out):
""" Create icon by file fname_in. Result will have saved as fname_out.
"""
img_new = Image.new('RGBA', (ICON_WIDTH, ICON_HEIGHT))
img_src = Image.open(fname_in)
# Resize source imaga, if it is required
width_src, height_src = img_src.size
scale = max(float(width_src) / float(ICON_WIDTH), float(height_src) /
float(ICON_HEIGHT))
if scale > 1:
img_src = img_src.resize((int(width_src / scale),
int(height_src / scale)),
Image.ANTIALIAS)
# Paste source image to result image
dx = int((ICON_WIDTH - img_src.size[0]) / 2.0)
dy = int((ICON_HEIGHT - img_src.size[1]) / 2.0)
assert dx >= 0 and dx < ICON_WIDTH
assert dy >= 0 and dy < ICON_HEIGHT
img_new.paste(img_src, (dx, dy))
img_new.save(fname_out)
| gpl-3.0 | 5,693,076,158,499,590,000 | 33.878788 | 78 | 0.546481 | false |
Titulacion-Sistemas/PythonTitulacion-EV | Lib/site-packages/pylint/test/unittest_checker_python3.py | 1 | 11459 | # Copyright 2014 Google Inc.
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Tests for the python3 checkers."""
from __future__ import absolute_import
import sys
import unittest
import textwrap
from astroid import test_utils
from pylint import testutils
from pylint.checkers import python3 as checker
def python2_only(test):
"""Decorator for any tests that will fail under Python 3."""
return unittest.skipIf(sys.version_info[0] > 2, 'Python 2 only')(test)
# TODO(cpopa): Port these to the functional test framework instead.
class Python3CheckerTest(testutils.CheckerTestCase):
CHECKER_CLASS = checker.Python3Checker
def check_bad_builtin(self, builtin_name):
node = test_utils.extract_node(builtin_name + ' #@')
message = builtin_name.lower() + '-builtin'
with self.assertAddsMessages(testutils.Message(message, node=node)):
self.checker.visit_name(node)
@python2_only
def test_bad_builtins(self):
builtins = [
'apply',
'buffer',
'cmp',
'coerce',
'execfile',
'file',
'long',
'raw_input',
'reduce',
'StandardError',
'unicode',
'xrange',
'reload',
]
for builtin in builtins:
self.check_bad_builtin(builtin)
def _test_defined_method(self, method, warning):
node = test_utils.extract_node("""
class Foo(object):
def __{0}__(self, other): #@
pass""".format(method))
message = testutils.Message(warning, node=node)
with self.assertAddsMessages(message):
self.checker.visit_function(node)
def test_delslice_method(self):
self._test_defined_method('delslice', 'delslice-method')
def test_getslice_method(self):
self._test_defined_method('getslice', 'getslice-method')
def test_setslice_method(self):
self._test_defined_method('setslice', 'setslice-method')
def test_coerce_method(self):
self._test_defined_method('coerce', 'coerce-method')
def test_oct_method(self):
self._test_defined_method('oct', 'oct-method')
def test_hex_method(self):
self._test_defined_method('hex', 'hex-method')
def test_nonzero_method(self):
self._test_defined_method('nonzero', 'nonzero-method')
def test_cmp_method(self):
self._test_defined_method('cmp', 'cmp-method')
@python2_only
def test_print_statement(self):
node = test_utils.extract_node('print "Hello, World!" #@')
message = testutils.Message('print-statement', node=node)
with self.assertAddsMessages(message):
self.checker.visit_print(node)
@python2_only
def test_backtick(self):
node = test_utils.extract_node('`test`')
message = testutils.Message('backtick', node=node)
with self.assertAddsMessages(message):
self.checker.visit_backquote(node)
def test_relative_import(self):
node = test_utils.extract_node('import string #@')
message = testutils.Message('no-absolute-import', node=node)
with self.assertAddsMessages(message):
self.checker.visit_import(node)
def test_relative_from_import(self):
node = test_utils.extract_node('from os import path #@')
message = testutils.Message('no-absolute-import', node=node)
with self.assertAddsMessages(message):
self.checker.visit_import(node)
def test_absolute_import(self):
module_import = test_utils.build_module(
'from __future__ import absolute_import; import os')
module_from = test_utils.build_module(
'from __future__ import absolute_import; from os import path')
with self.assertNoMessages():
for module in (module_import, module_from):
self.walk(module)
def test_division(self):
node = test_utils.extract_node('3 / 2 #@')
message = testutils.Message('old-division', node=node)
with self.assertAddsMessages(message):
self.checker.visit_binop(node)
def test_division_with_future_statement(self):
module = test_utils.build_module('from __future__ import division; 3 / 2')
with self.assertNoMessages():
self.walk(module)
def test_floor_division(self):
node = test_utils.extract_node(' 3 // 2 #@')
with self.assertNoMessages():
self.checker.visit_binop(node)
def test_division_by_float(self):
left_node = test_utils.extract_node('3.0 / 2 #@')
right_node = test_utils.extract_node(' 3 / 2.0 #@')
with self.assertNoMessages():
for node in (left_node, right_node):
self.checker.visit_binop(node)
def test_dict_iter_method(self):
for meth in ('keys', 'values', 'items'):
node = test_utils.extract_node('x.iter%s() #@' % meth)
message = testutils.Message('dict-iter-method', node=node)
with self.assertAddsMessages(message):
self.checker.visit_callfunc(node)
def test_dict_iter_method_on_dict(self):
node = test_utils.extract_node('{}.iterkeys()')
message = testutils.Message('dict-iter-method', node=node)
with self.assertAddsMessages(message):
self.checker.visit_callfunc(node)
def test_dict_not_iter_method(self):
arg_node = test_utils.extract_node('x.iterkeys(x) #@')
stararg_node = test_utils.extract_node('x.iterkeys(*x) #@')
kwarg_node = test_utils.extract_node('x.iterkeys(y=x) #@')
non_dict_node = test_utils.extract_node('x=[]\nx.iterkeys() #@')
with self.assertNoMessages():
for node in (arg_node, stararg_node, kwarg_node, non_dict_node):
self.checker.visit_callfunc(node)
def test_dict_view_method(self):
for meth in ('keys', 'values', 'items'):
node = test_utils.extract_node('x.view%s() #@' % meth)
message = testutils.Message('dict-view-method', node=node)
with self.assertAddsMessages(message):
self.checker.visit_callfunc(node)
def test_dict_view_method_on_dict(self):
node = test_utils.extract_node('{}.viewkeys()')
message = testutils.Message('dict-view-method', node=node)
with self.assertAddsMessages(message):
self.checker.visit_callfunc(node)
def test_dict_not_view_method(self):
arg_node = test_utils.extract_node('x.viewkeys(x) #@')
stararg_node = test_utils.extract_node('x.viewkeys(*x) #@')
kwarg_node = test_utils.extract_node('x.viewkeys(y=x) #@')
non_dict_node = test_utils.extract_node('x=[]\nx.viewkeys() #@')
with self.assertNoMessages():
for node in (arg_node, stararg_node, kwarg_node, non_dict_node):
self.checker.visit_callfunc(node)
def test_next_method(self):
node = test_utils.extract_node('x.next() #@')
message = testutils.Message('next-method-called', node=node)
with self.assertAddsMessages(message):
self.checker.visit_callfunc(node)
@python2_only
def test_implicit_map_evaluation(self):
node = test_utils.extract_node('map(str, [1, 2, 3])')
discard = node.parent
message = testutils.Message('implicit-map-evaluation', node=discard)
with self.assertAddsMessages(message):
# Use node.parent because extract_node returns the value
# of a discard node, not the discard itself.
self.checker.visit_discard(discard)
def test_not_next_method(self):
arg_node = test_utils.extract_node('x.next(x) #@')
stararg_node = test_utils.extract_node('x.next(*x) #@')
kwarg_node = test_utils.extract_node('x.next(y=x) #@')
with self.assertNoMessages():
for node in (arg_node, stararg_node, kwarg_node):
self.checker.visit_callfunc(node)
def test_metaclass_assignment(self):
node = test_utils.extract_node("""
class Foo(object): #@
__metaclass__ = type""")
message = testutils.Message('metaclass-assignment', node=node)
with self.assertAddsMessages(message):
self.checker.visit_class(node)
def test_metaclass_global_assignment(self):
module = test_utils.build_module('__metaclass__ = type')
with self.assertNoMessages():
self.walk(module)
@python2_only
def test_parameter_unpacking(self):
node = test_utils.extract_node('def func((a, b)):#@\n pass')
arg = node.args.args[0]
with self.assertAddsMessages(testutils.Message('parameter-unpacking', node=arg)):
self.checker.visit_arguments(node.args)
@python2_only
def test_old_raise_syntax(self):
node = test_utils.extract_node('raise Exception, "test"')
message = testutils.Message('old-raise-syntax', node=node)
with self.assertAddsMessages(message):
self.checker.visit_raise(node)
@python2_only
def test_raising_string(self):
node = test_utils.extract_node('raise "Test"')
message = testutils.Message('raising-string', node=node)
with self.assertAddsMessages(message):
self.checker.visit_raise(node)
@python2_only
def test_checker_disabled_by_default(self):
node = test_utils.build_module(textwrap.dedent("""
abc = 1l
raise Exception, "test"
raise "test"
`abc`
"""))
with self.assertNoMessages():
self.walk(node)
@python2_only
class Python3TokenCheckerTest(testutils.CheckerTestCase):
CHECKER_CLASS = checker.Python3TokenChecker
def _test_token_message(self, code, symbolic_message):
tokens = testutils.tokenize_str(code)
message = testutils.Message(symbolic_message, line=1)
with self.assertAddsMessages(message):
self.checker.process_tokens(tokens)
def test_long_suffix(self):
for code in ("1l", "1L"):
self._test_token_message(code, 'long-suffix')
def test_old_ne_operator(self):
self._test_token_message("1 <> 2", "old-ne-operator")
def test_old_octal_literal(self):
for octal in ("045", "055", "075", "077", "076543"):
self._test_token_message(octal, "old-octal-literal")
# Make sure we are catching only octals.
for non_octal in ("45", "00", "085", "08", "1"):
tokens = testutils.tokenize_str(non_octal)
with self.assertNoMessages():
self.checker.process_tokens(tokens)
if __name__ == '__main__':
unittest.main()
| mit | -2,221,897,753,633,817,900 | 37.456376 | 89 | 0.620909 | false |
hofschroeer/shinysdr | shinysdr/test/test_telemetry.py | 1 | 4109 | # Copyright 2015 Kevin Reid <[email protected]>
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division
from twisted.internet.task import Clock
from twisted.trial import unittest
from zope.interface import implements
from shinysdr.telemetry import ITelemetryMessage, ITelemetryObject, TelemetryItem, TelemetryStore, Track, empty_track
class TestTrack(unittest.TestCase):
def test_init_from_partial_json(self):
self.assertEquals(
empty_track._replace(
latitude=TelemetryItem(1, 1000),
longitude=TelemetryItem(2, 1000)),
Track({
u'latitude': {u'value': 1, u'timestamp': 1000},
u'longitude': {u'value': 2, u'timestamp': 1000},
}))
class TestTelemetryStore(unittest.TestCase):
def setUp(self):
self.clock = Clock()
self.clock.advance(1000)
self.store = TelemetryStore(time_source=self.clock)
def test_new_object(self):
self.assertEqual([], self.store.state().keys())
self.store.receive(Msg('foo', 1000))
self.assertEqual(['foo'], self.store.state().keys())
obj = self.store.state()['foo'].get()
self.assertIsInstance(obj, Obj)
def test_receive_called(self):
self.store.receive(Msg('foo', 1000, 1))
obj = self.store.state()['foo'].get()
self.assertEquals(obj.last_msg, 1)
self.store.receive(Msg('foo', 1000, 2))
self.assertEquals(obj.last_msg, 2)
def test_drop_old(self):
self.store.receive(Msg('foo', 1000))
self.assertEqual(['foo'], self.store.state().keys())
self.clock.advance(1799.5)
self.store.receive(Msg('bar', 2799.5))
self.assertEqual({'bar', 'foo'}, set(self.store.state().keys()))
self.clock.advance(0.5)
self.store.receive(Msg('bar', 2800))
self.assertEqual(['bar'], self.store.state().keys())
def test_become_interesting(self):
self.store.receive(Msg('foo', 1000, 'boring'))
self.assertEqual([], self.store.state().keys())
self.store.receive(Msg('foo', 1001, 'interesting'))
self.assertEqual(['foo'], self.store.state().keys())
# 'become boring' is not implemented, so also not tested yet
def test_drop_old_boring(self):
'''
Make sure that dropping a boring object doesn't fail.
'''
self.store.receive(Msg('foo', 1000, 'boring'))
self.assertEqual([], self.store.state().keys())
self.clock.advance(1800)
self.store.receive(Msg('bar', 2800, 'boring'))
self.assertEqual([], self.store.state().keys())
class Msg(object):
implements(ITelemetryMessage)
def __init__(self, object_id, timestamp, value='no value'):
self.__id = object_id
self.timestamp = timestamp
self.value = value
def get_object_id(self):
return self.__id
def get_object_constructor(self):
return Obj
class Obj(object):
implements(ITelemetryObject)
def __init__(self, object_id):
self.__id = object_id
self.last_msg = 'no message'
self.last_time = None
def receive(self, message):
self.last_msg = message.value
self.last_time = message.timestamp
def is_interesting(self):
return self.last_msg != 'boring'
def get_object_expiry(self):
return self.last_time + 1800
| gpl-3.0 | 1,781,579,853,708,462,800 | 33.822034 | 117 | 0.627403 | false |
lyshie/afc-github | app/__init__.py | 1 | 6042 | # -*- coding: utf-8 -*-
from flask import Flask, request, session
from flask_login import LoginManager
from flask_wtf.csrf import CSRFProtect
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from flask_babel import Babel
from flask_babel import gettext as _
import logging
import os
from distutils.util import strtobool
from werkzeug.contrib.fixers import ProxyFix
import pytz
import locale
import calendar
import re
'''
Application configs
'''
app = Flask(__name__, static_url_path="")
app.wsgi_app = ProxyFix(app.wsgi_app)
# default config file
app.config.from_object("config")
# override, private config
app.config.from_pyfile("config.py")
if strtobool(os.environ.get("SQLALCHEMY_LOG", "False")):
sql_logger = logging.getLogger('sqlalchemy.engine')
sql_logger.setLevel(logging.INFO)
loggers = [sql_logger]
for logger in loggers:
for handler in app.logger.handlers:
logger.addHandler(handler)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "login"
login_manager.login_message = app.config['LOGIN_MESSAGE']
csrf = CSRFProtect()
csrf.init_app(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command("db", MigrateCommand)
babel = Babel(app)
'''
Localization
'''
@app.before_request
def set_locale():
lang = request.args.get("lang")
if lang and lang in ['zh', 'en']:
session["lang"] = lang
else:
session["lang"] = 'zh'
@babel.localeselector
def get_locale():
lang = session.get("lang")
if lang:
return lang
lang = "{}".format(request.accept_languages.best_match(
['zh-tw', 'zh', 'en'])).split("-", 1)[0]
return lang
#@cache.memoize(timeout=300)
def current_tzinfo():
tzinfo = None
tz_name = app.config.get("BABEL_DEFAULT_TIMEZONE")
if tz_name:
tzinfo = pytz.timezone(tz_name)
return tzinfo
'''
Jinja2 Template filters
'''
@app.template_filter()
def datetimefilter(value, format='%Y/%m/%d %H:%M %z'):
# 2017/01/03 23:38 +0800
if value:
# convert UTC to local time-zone
value = value.replace(tzinfo=pytz.utc).astimezone(current_tzinfo())
return value.strftime(format)
else:
return ""
@app.template_filter()
def currencyfilter(value):
# 1,234,567
return "{:,}".format(int(value))
@app.template_filter()
def coursetimefilter(value):
# 週二 (1610-1730)
locale.setlocale(locale.LC_ALL, "zh_TW.UTF-8")
result = u""
classes = ("" + value).split(",")
for c in classes:
w, t = c.split("(")
t = re.sub(r'[\(\)]', '', t)
result = result + \
u"{} ({})\n".format(calendar.day_name[
int(w) - 1].decode("UTF-8"), t)
return result.strip()
@app.template_filter()
def coursestatefilter(value):
states = {
0: _('Normal'),
1: _('Suspended'),
2: _('Test'),
}
return states[int(value)]
@app.template_filter()
def iconfilter(value):
maps = [
{'rule': r'(直排輪|溜冰)', 'icon': 'skating'},
{'rule': r'(鍵盤合奏)', 'icon': 'piano'},
{'rule': r'(跆拳道)', 'icon': 'taekwondo'},
{'rule': r'(珠心算)', 'icon': 'abacus'},
{'rule': r'(足球)', 'icon': 'football'},
{'rule': r'(淡彩|彩繪)', 'icon': 'artist'},
{'rule': r'(鉛筆)', 'icon': 'pencil'},
{'rule': r'(舞)', 'icon': 'dancer'},
{'rule': r'(羽球)', 'icon': 'shuttlecock'},
{'rule': r'(口琴)', 'icon': 'harmonica'},
{'rule': r'(射箭)', 'icon': 'target'},
{'rule': r'(鍵盤)', 'icon': 'karate'},
{'rule': r'(烏克麗麗)', 'icon': 'ukelele'},
{'rule': r'(籃球)', 'icon': 'basketball'},
{'rule': r'(竹笛)', 'icon': 'flute'},
{'rule': r'(扯鈴)', 'icon': 'diabolo'},
{'rule': r'(桌球)', 'icon': 'ping-pong'},
{'rule': r'(圍棋)', 'icon': 'chess-board'},
{'rule': r'(游泳)', 'icon': 'swimming-figure'},
{'rule': r'(書法|毛筆)', 'icon': 'chinese-paper-writing'},
{'rule': r'(素描)', 'icon': 'sketch'},
{'rule': r'(油畫)', 'icon': 'paint-brush'},
{'rule': r'(樹脂土)', 'icon': 'pottery-man'},
{'rule': r'(壘球|棒球)', 'icon': 'baseball'},
{'rule': r'(提琴)', 'icon': 'violin'},
{'rule': r'(功夫|武術)', 'icon': 'martial-arts'},
{'rule': r'(版畫|水墨)', 'icon': 'rgb'},
{'rule': r'(體適能)', 'icon': 'gymnastics'},
]
for m in maps:
if re.search(m['rule'], value.encode("UTF-8")):
return m['icon']
return 'brainstorm'
@app.template_filter()
def studenttagfilter(value):
tags = {
'N': u'一般',
'K': u'幼兒園',
'D': u'舞蹈班',
'H': u'口琴班舊生',
'P': u'鍵盤舊生',
'X': u'舞蹈班、鍵盤舊生',
'Y': u'口琴班舊生、鍵盤舊生',
'Z': u'舞蹈班、口琴班舊生',
'DX': u'舞蹈班',
'DZ': u'舞蹈班',
'XZ': u'舞蹈班',
'DXZ': u'舞蹈班',
'HY': u'口琴班舊生',
'HZ': u'口琴班舊生',
'YZ': u'口琴班舊生',
'HYZ': u'口琴班舊生',
'PX': u'鍵盤舊生',
'PY': u'鍵盤舊生',
'XY': u'鍵盤舊生',
'PXY': u'鍵盤舊生',
}
result = ""
for c in list(value):
if tags.has_key(c):
result += tags[c]
else:
result += c
return result
app.jinja_env.filters['datetimefilter'] = datetimefilter
app.jinja_env.filters['currencyfilter'] = currencyfilter
app.jinja_env.filters['coursetimefilter'] = coursetimefilter
app.jinja_env.filters['coursestatefilter'] = coursestatefilter
app.jinja_env.filters['iconfilter'] = iconfilter
app.jinja_env.filters['studenttagfilter'] = studenttagfilter
'''
Models
'''
from models import *
from views import *
| gpl-3.0 | -3,482,189,673,653,938,000 | 24.008772 | 75 | 0.559102 | false |
Ecogenomics/GtdbTk | gtdbtk/config/config.py | 1 | 12048 | import json
import os
import sys
"""
Load the reference package. This will fail if the directory doesn't exist.
"""
try:
GENERIC_PATH = os.environ['GTDBTK_DATA_PATH']
except KeyError:
print('\n' + '=' * 80)
print(' ERROR '.center(80))
print('_' * 80 + '\n')
print("The 'GTDBTK_DATA_PATH' environment variable is not defined.".center(80) + '\n')
print('Please set this variable to your reference data package.'.center(80))
print('https://github.com/Ecogenomics/GTDBTk#installation'.center(80))
print('=' * 80)
sys.exit(1)
"""
If the reference package sub-folders still exist in GTDBTK_DATA_PATH, then there
is no need to edit the variables below.
"""
MIN_REF_DATA_VERSION = 'r202'
MSA_FOLDER = os.path.join(GENERIC_PATH, "msa/")
MASK_DIR = os.path.join(GENERIC_PATH, "masks/")
PPLACER_DIR = os.path.join(GENERIC_PATH, "pplacer/")
FASTANI_DIR = os.path.join(GENERIC_PATH, "fastani/")
TAX_FOLDER = os.path.join(GENERIC_PATH, "taxonomy/")
RADII_DIR = os.path.join(GENERIC_PATH, "radii/")
METADATA_DIR = os.path.join(GENERIC_PATH, "metadata/")
RED_DIR = os.path.join(GENERIC_PATH, "mrca_red/")
MARKER_DIR = os.path.join(GENERIC_PATH, 'markers/')
TIGRFAM_HMMS = os.path.join(MARKER_DIR, 'tigrfam/tigrfam.hmm')
PFAM_HMM_DIR = os.path.join(MARKER_DIR, 'pfam/')
SPLIT_DIR = os.path.join(GENERIC_PATH, 'split')
HIGH_SPLIT_DIR = os.path.join(SPLIT_DIR, 'high')
LOW_SPLIT_DIR = os.path.join(SPLIT_DIR, 'low')
HIGH_PPLACER_DIR = os.path.join(HIGH_SPLIT_DIR, 'pplacer')
LOW_PPLACER_DIR = os.path.join(LOW_SPLIT_DIR, 'pplacer')
HIGH_RED_DIR = os.path.join(HIGH_SPLIT_DIR, 'red')
LOW_RED_DIR = os.path.join(LOW_SPLIT_DIR, 'red')
LOW_TREE_MAPPING_FILE = os.path.join(LOW_SPLIT_DIR, 'tree_mapping.tsv')
HIGH_PPLACER_REF_PKG = 'gtdbtk_package_high_level'
HIGH_RED_FILE = os.path.join(HIGH_RED_DIR, 'high_red_value.tsv')
LOW_PPLACER_REF_PKG = os.path.join(LOW_PPLACER_DIR, 'gtdbtk.package.{iter}.refpkg')
LOW_RED_FILE = os.path.join(LOW_RED_DIR, 'red_value_{iter}.tsv')
RED_DIST_BAC_DICT = ''
RED_DIST_ARC_DICT = ''
VERSION_DATA = ''
try:
with open(os.path.join(METADATA_DIR, "metadata.txt")) as metadataData:
for line in metadataData:
try:
line_infos = line.strip().split('=')
if line_infos[0] == 'RED_DIST_BAC_DICT':
RED_DIST_BAC_DICT = json.loads(line_infos[1])
elif line_infos[0] == 'RED_DIST_ARC_DICT':
RED_DIST_ARC_DICT = json.loads(line_infos[1])
elif line_infos[0] == 'VERSION_DATA':
VERSION_DATA = line_infos[1]
except ValueError:
print(f"Skipping invalid line {repr(line)}")
except IOError:
print('\n' + '=' * 80)
print(' ERROR '.center(80))
print('_' * 80 + '\n')
print('The GTDB-Tk reference data does not exist or is corrupted.'.center(80))
print(('GTDBTK_DATA_PATH=%s' % GENERIC_PATH).center(80) + '\n')
print('Please compare the checksum to those provided in the download repository.'.center(80))
print('https://github.com/Ecogenomics/GTDBTk#gtdb-tk-reference-data'.center(80))
print('=' * 80)
sys.exit(1)
# Relative Evolution Distance
RED_INTERVAL = 0.1
RED_MIN_SUPPORT = 0.0
RED_MIN_CHILDREN = 2
# Marker information
BAC120_MARKERS = {"PFAM": ["PF00380.20.hmm", "PF00410.20.hmm", "PF00466.21.hmm",
"PF01025.20.hmm", "PF02576.18.hmm", "PF03726.15.hmm"],
"TIGRFAM": ["TIGR00006.HMM", "TIGR00019.HMM", "TIGR00020.HMM",
"TIGR00029.HMM", "TIGR00043.HMM", "TIGR00054.HMM",
"TIGR00059.HMM", "TIGR00061.HMM", "TIGR00064.HMM",
"TIGR00065.HMM", "TIGR00082.HMM", "TIGR00083.HMM",
"TIGR00084.HMM", "TIGR00086.HMM", "TIGR00088.HMM",
"TIGR00090.HMM", "TIGR00092.HMM", "TIGR00095.HMM",
"TIGR00115.HMM", "TIGR00116.HMM", "TIGR00138.HMM",
"TIGR00158.HMM", "TIGR00166.HMM", "TIGR00168.HMM",
"TIGR00186.HMM", "TIGR00194.HMM", "TIGR00250.HMM",
"TIGR00337.HMM", "TIGR00344.HMM", "TIGR00362.HMM",
"TIGR00382.HMM", "TIGR00392.HMM", "TIGR00396.HMM",
"TIGR00398.HMM", "TIGR00414.HMM", "TIGR00416.HMM",
"TIGR00420.HMM", "TIGR00431.HMM", "TIGR00435.HMM",
"TIGR00436.HMM", "TIGR00442.HMM", "TIGR00445.HMM",
"TIGR00456.HMM", "TIGR00459.HMM", "TIGR00460.HMM",
"TIGR00468.HMM", "TIGR00472.HMM", "TIGR00487.HMM",
"TIGR00496.HMM", "TIGR00539.HMM", "TIGR00580.HMM",
"TIGR00593.HMM", "TIGR00615.HMM", "TIGR00631.HMM",
"TIGR00634.HMM", "TIGR00635.HMM", "TIGR00643.HMM",
"TIGR00663.HMM", "TIGR00717.HMM", "TIGR00755.HMM",
"TIGR00810.HMM", "TIGR00922.HMM", "TIGR00928.HMM",
"TIGR00959.HMM", "TIGR00963.HMM", "TIGR00964.HMM",
"TIGR00967.HMM", "TIGR01009.HMM", "TIGR01011.HMM",
"TIGR01017.HMM", "TIGR01021.HMM", "TIGR01029.HMM",
"TIGR01032.HMM", "TIGR01039.HMM", "TIGR01044.HMM",
"TIGR01059.HMM", "TIGR01063.HMM", "TIGR01066.HMM",
"TIGR01071.HMM", "TIGR01079.HMM", "TIGR01082.HMM",
"TIGR01087.HMM", "TIGR01128.HMM", "TIGR01146.HMM",
"TIGR01164.HMM", "TIGR01169.HMM", "TIGR01171.HMM",
"TIGR01302.HMM", "TIGR01391.HMM", "TIGR01393.HMM",
"TIGR01394.HMM", "TIGR01510.HMM", "TIGR01632.HMM",
"TIGR01951.HMM", "TIGR01953.HMM", "TIGR02012.HMM",
"TIGR02013.HMM", "TIGR02027.HMM", "TIGR02075.HMM",
"TIGR02191.HMM", "TIGR02273.HMM", "TIGR02350.HMM",
"TIGR02386.HMM", "TIGR02397.HMM", "TIGR02432.HMM",
"TIGR02729.HMM", "TIGR03263.HMM", "TIGR03594.HMM",
"TIGR03625.HMM", "TIGR03632.HMM", "TIGR03654.HMM",
"TIGR03723.HMM", "TIGR03725.HMM", "TIGR03953.HMM"]}
AR122_MARKERS = {"PFAM": ["PF01868.17.hmm", "PF01282.20.hmm", "PF01655.19.hmm",
"PF01092.20.hmm", "PF01000.27.hmm", "PF00368.19.hmm",
"PF00827.18.hmm", "PF01269.18.hmm", "PF00466.21.hmm",
"PF01015.19.hmm", "PF13685.7.hmm", "PF02978.20.hmm",
"PF04919.13.hmm", "PF01984.21.hmm", "PF04104.15.hmm",
"PF00410.20.hmm", "PF01798.19.hmm", "PF01864.18.hmm",
"PF01990.18.hmm", "PF07541.13.hmm", "PF04019.13.hmm",
"PF00900.21.hmm", "PF01090.20.hmm", "PF02006.17.hmm",
"PF01157.19.hmm", "PF01191.20.hmm", "PF01866.18.hmm",
"PF01198.20.hmm", "PF01496.20.hmm", "PF00687.22.hmm",
"PF03874.17.hmm", "PF01194.18.hmm", "PF01200.19.hmm",
"PF13656.7.hmm", "PF01280.21.hmm"],
"TIGRFAM": ["TIGR00468.HMM", "TIGR01060.HMM", "TIGR03627.HMM",
"TIGR01020.HMM", "TIGR02258.HMM", "TIGR00293.HMM",
"TIGR00389.HMM", "TIGR01012.HMM", "TIGR00490.HMM",
"TIGR03677.HMM", "TIGR03636.HMM", "TIGR03722.HMM",
"TIGR00458.HMM", "TIGR00291.HMM", "TIGR00670.HMM",
"TIGR00064.HMM", "TIGR03629.HMM", "TIGR00021.HMM",
"TIGR03672.HMM", "TIGR00111.HMM", "TIGR03684.HMM",
"TIGR01077.HMM", "TIGR01213.HMM", "TIGR01080.HMM",
"TIGR00501.HMM", "TIGR00729.HMM", "TIGR01038.HMM",
"TIGR00270.HMM", "TIGR03628.HMM", "TIGR01028.HMM",
"TIGR00521.HMM", "TIGR03671.HMM", "TIGR00240.HMM",
"TIGR02390.HMM", "TIGR02338.HMM", "TIGR00037.HMM",
"TIGR02076.HMM", "TIGR00335.HMM", "TIGR01025.HMM",
"TIGR00471.HMM", "TIGR00336.HMM", "TIGR00522.HMM",
"TIGR02153.HMM", "TIGR02651.HMM", "TIGR03674.HMM",
"TIGR00323.HMM", "TIGR00134.HMM", "TIGR02236.HMM",
"TIGR03683.HMM", "TIGR00491.HMM", "TIGR00658.HMM",
"TIGR03680.HMM", "TIGR00392.HMM", "TIGR00422.HMM",
"TIGR00279.HMM", "TIGR01052.HMM", "TIGR00442.HMM",
"TIGR00308.HMM", "TIGR00398.HMM", "TIGR00456.HMM",
"TIGR00549.HMM", "TIGR00408.HMM", "TIGR00432.HMM",
"TIGR00264.HMM", "TIGR00982.HMM", "TIGR00324.HMM",
"TIGR01952.HMM", "TIGR03626.HMM", "TIGR03670.HMM",
"TIGR00337.HMM", "TIGR01046.HMM", "TIGR01018.HMM",
"TIGR00936.HMM", "TIGR00463.HMM", "TIGR01309.HMM",
"TIGR03653.HMM", "TIGR00042.HMM", "TIGR02389.HMM",
"TIGR00307.HMM", "TIGR03673.HMM", "TIGR00373.HMM",
"TIGR01008.HMM", "TIGR00283.HMM", "TIGR00425.HMM",
"TIGR00405.HMM", "TIGR03665.HMM", "TIGR00448.HMM"]}
# Information for Multiple hits markers:
DEFAULT_MULTIHIT_THRESHOLD = 10.0
# Information for aligning genomes
DEFAULT_DOMAIN_THRESHOLD = 10.0
AR_MARKER_COUNT = 122
BAC_MARKER_COUNT = 120
# Information about alignment Fraction to resolve fastANI results
AF_THRESHOLD = 0.65
# MSA file names
CONCAT_BAC120 = os.path.join(MSA_FOLDER, f"gtdb_{VERSION_DATA}_bac120.faa")
CONCAT_AR122 = os.path.join(MSA_FOLDER, f"gtdb_{VERSION_DATA}_ar122.faa")
# Taxonomy file name
TAXONOMY_FILE = os.path.join(TAX_FOLDER, "gtdb_taxonomy.tsv")
# Type Strain radii file
RADII_FILE = os.path.join(RADII_DIR, "gtdb_radii.tsv")
# Mask file names
MASK_BAC120 = f"gtdb_{VERSION_DATA}_bac120.mask"
MASK_AR122 = f"gtdb_{VERSION_DATA}_ar122.mask"
MASK_RPS23 = f"gtdb_{VERSION_DATA}_rps23.mask"
# Pplacer configuration
PPLACER_BAC120_REF_PKG = f"gtdb_{VERSION_DATA}_bac120.refpkg"
PPLACER_AR122_REF_PKG = f"gtdb_{VERSION_DATA}_ar122.refpkg"
PPLACER_RPS23_REF_PKG = f"gtdb_{VERSION_DATA}_rps23.refpkg"
PPLACER_MIN_RAM_BAC = 204
PPLACER_MIN_RAM_ARC = 13
# Fastani configuration
FASTANI_SPECIES_THRESHOLD = 95.0
FASTANI_GENOMES = os.path.join(FASTANI_DIR, "database/")
FASTANI_GENOME_LIST = os.path.join(FASTANI_DIR, "genome_paths.tsv")
FASTANI_GENOMES_EXT = "_genomic.fna.gz"
# MRCA RED VALUE
MRCA_RED_BAC120 = os.path.join(RED_DIR, f"gtdbtk_{VERSION_DATA}_bac120.tsv")
MRCA_RED_AR122 = os.path.join(RED_DIR, f"gtdbtk_{VERSION_DATA}_ar122.tsv")
# Hashing information for validating the reference package.
REF_HASHES = {PPLACER_DIR: '4d931b5109a240602f55228029b87ee768da8141',
MASK_DIR: '36d6ac371d247b2b952523b9798e78908ea323fa',
MARKER_DIR: '2ba5ae35fb272462663651d18fd9e523317e48cd',
RADII_DIR: '9f9a2e21e27b9049044d04d731795499414a365c',
MSA_FOLDER: 'b426865245c39ee9f01b0392fb8f7867a9f76f0a',
METADATA_DIR: '7640aed96fdb13707a2b79b746a94335faabd6df',
TAX_FOLDER: '4a7a1e4047c088e92dee9740206499cdb7e5beca',
FASTANI_DIR: '70439cf088d0fa0fdbb4f47b4a6b47e199912139',
RED_DIR: 'ad6a184150e7b6e58547912660a17999fadcfbff'}
# Config values for checking GTDB-Tk on startup.
GTDBTK_VER_CHECK = True
GTDBTK_VER_TIMEOUT = 3 # seconds
# Internal settings used for logging.
LOG_TASK = 21
| gpl-3.0 | 698,190,801,838,647,900 | 51.842105 | 97 | 0.560508 | false |
m3talstorm/foe-bot | foe/models/resources.py | 1 | 2337 |
"""
"""
# Native
import time
import pprint
import json
from collections import OrderedDict
# 3rd-Party
from sqlalchemy import Table, Column, ForeignKey, Integer, String, Boolean, Float
from sqlalchemy.orm import relationship, backref
import pydash
#
from request import Request
from models.model import Model
class Resources(Model):
"""
"""
REQUEST_CLASS = "ResourceService"
__tablename__ = 'resources'
# Attributes
# ---------------------------------------------------------
money = Column(Integer, default=0)
supplies = Column(Integer, default=0)
granite = Column(Integer, default=0)
carnival_roses = Column(Integer, default=0)
stars = Column(Integer, default=0)
cloth = Column(Integer, default=0)
honey = Column(Integer, default=0)
lead = Column(Integer, default=0)
population = Column(Integer, default=0)
gems = Column(Integer, default=0)
sandstone = Column(Integer, default=0)
wine = Column(Integer, default=0)
guild_expedition_attempt = Column(Integer, default=0)
medals = Column(Integer, default=0)
alabaster = Column(Integer, default=0)
dye = Column(Integer, default=0)
cypress = Column(Integer, default=0)
ebony = Column(Integer, default=0)
limestone = Column(Integer, default=0)
negotiation_game_turn = Column(Integer, default=0)
expansions = Column(Integer, default=0)
summer_tickets = Column(Integer, default=0)
spring_lanterns = Column(Integer, default=0)
tavern_silver = Column(Integer, default=0)
premium = Column(Integer, default=0)
raw_cypress = Column(Integer, default=0)
raw_dye = Column(Integer, default=0)
raw_cloth = Column(Integer, default=0)
raw_ebony = Column(Integer, default=0)
raw_granite = Column(Integer, default=0)
# Back-refs
# ---------------------------------------------------------
account_id = Column(Integer, ForeignKey('account.player_id'), primary_key=True)
def __init__(self, *args, **kwargs):
"""
"""
return super(Resources, self).__init__(*args, **kwargs)
def __repr__(self):
"""
"""
return "Resources"
def populate(self, *args, **kwargs):
"""
"""
return super(Resources, self).populate(*args, **kwargs)
| mit | -8,472,919,973,206,751,000 | 19.146552 | 83 | 0.611896 | false |
SectorLabs/django-postgres-extra | psqlextra/models/partitioned.py | 1 | 1374 | from django.db.models.base import ModelBase
from psqlextra.types import PostgresPartitioningMethod
from .base import PostgresModel
from .options import PostgresPartitionedModelOptions
class PostgresPartitionedModelMeta(ModelBase):
"""Custom meta class for :see:PostgresPartitionedModel.
This meta class extracts attributes from the inner
`PartitioningMeta` class and copies it onto a `_partitioning_meta`
attribute. This is similar to how Django's `_meta` works.
"""
default_method = PostgresPartitioningMethod.RANGE
default_key = []
def __new__(cls, name, bases, attrs, **kwargs):
new_class = super().__new__(cls, name, bases, attrs, **kwargs)
meta_class = attrs.pop("PartitioningMeta", None)
method = getattr(meta_class, "method", None)
key = getattr(meta_class, "key", None)
patitioning_meta = PostgresPartitionedModelOptions(
method=method or cls.default_method, key=key or cls.default_key
)
new_class.add_to_class("_partitioning_meta", patitioning_meta)
return new_class
class PostgresPartitionedModel(
PostgresModel, metaclass=PostgresPartitionedModelMeta
):
"""Base class for taking advantage of PostgreSQL's 11.x native support for
table partitioning."""
class Meta:
abstract = True
base_manager_name = "objects"
| mit | -2,785,277,113,692,309,000 | 30.953488 | 78 | 0.700873 | false |
vially/googlemusic-xbmc | resources/Lib/navigation.py | 1 | 31743 | import time
from urllib import quote_plus, urlencode
import api
import utils
import xbmc
import xbmcplugin
from xbmcgui import ListItem
fanart = utils.addon.getAddonInfo('fanart')
class Navigation:
def __init__(self):
self.lang = utils.addon.getLocalizedString
self.api = api.Api()
self.contextmenu_action = "XBMC.RunPlugin("+utils.addon_url+"?action=%s&%s)"
self.main_menu = (
{'title': self.lang(30224), 'params': {'path': "home_menu"}, 'user': ['library', 'subscriber']},
{'title': self.lang(30219), 'params': {'path': "listennow"}, 'user': ['subscriber', 'free']},
{'title': self.lang(30220), 'params': {'path': "topcharts"}, 'user': ['subscriber']},
{'title': self.lang(30221), 'params': {'path': "newreleases"}, 'user': ['subscriber']},
{'title': self.lang(30209), 'params': {'path': "library"}, 'user': ['library']},
{'title': self.lang(30202), 'params': {'path': "playlists_menu"}, 'user': ['library', 'subscriber']},
{'title': self.lang(30222), 'params': {'path': "browse_stations"}, 'user': ['subscriber', 'free']},
{'title': self.lang(30208), 'params': {'path': "search"}, 'user': ['library', 'subscriber']}
)
self.lib_menu = (
{'title': self.lang(30203), 'params': {'path': "playlists", 'type': "radio"}},
{'title': self.lang(30210), 'params': {'path': "playlist", 'playlist_id': "feellucky"}},
{'title': self.lang(30214), 'params': {'path': "playlist", 'playlist_id': "shuffled_albums"}},
{'title': self.lang(30201), 'params': {'path': "playlist", 'playlist_id': "all_songs"}},
{'title': self.lang(30205), 'params': {'path': "filter", 'criteria': "artist"}},
{'title': self.lang(30206), 'params': {'path': "filter", 'criteria': "album"}},
{'title': self.lang(30207), 'params': {'path': "filter", 'criteria': "genre"}},
{'title': self.lang(30212), 'params': {'path': "filter", 'criteria': "composer"}},
)
self.playlists_menu = (
{'title': self.lang(30225), 'params': {'path': "playlists", 'type': "recent"}, 'user': ['library', 'subscriber']},
{'title': self.lang(30204), 'params': {'path': "playlists", 'type': "auto"}, 'user': ['library', 'subscriber']},
{'title': self.lang(30202), 'params': {'path': "playlists", 'type': "user"}, 'user': ['library', 'subscriber']},
)
self.home_menu = (
{'title': self.lang(30211), 'params': {'path': "ifl"}, 'user': ['library', 'subscriber']},
{'title': self.lang(30225), 'params': {'path': "home_recents"}, 'user': ['library', 'subscriber']},
)
def listMenu(self, params):
get = params.get
path = get("path", "root")
utils.log("PATH: " + path)
listItems = []
content = ''
sortMethods = [xbmcplugin.SORT_METHOD_UNSORTED]
if path == "root":
# assemble menu depending on user info
subscriber = utils.addon.getSettingBool('subscriber')
library = utils.addon.getSettingInt('fetched_count') > 0
utils.log("Assembling menu for subscriber=%r and library=%r" % (subscriber, library))
for item in self.main_menu:
user = item.pop('user')
if (subscriber and 'subscriber' in user) or \
(library and 'library' in user) or \
(not subscriber and 'free' in user):
listItems.append(item)
listItems = self.getMenuItems(listItems)
elif path == "ifl":
listItems = self.addSongsFromLibrary(self.api.getStationTracks("IFL"), 'library')
content = "songs"
elif path == "home_menu":
listItems = self.getMenuItems(self.home_menu)
listItems.extend(self.get_situations())
content = "albums"
elif path == "situation_items":
listItems = self.get_situations_items(get('situation_id'))
content = "albums"
elif path == "library":
listItems = self.getMenuItems(self.lib_menu)
elif path == "playlists_menu":
listItems = self.getMenuItems(self.playlists_menu)
elif path == "playlist":
listItems = self.listPlaylistSongs(get("playlist_id"))
if get("playlist_id") == 'all_songs':
sortMethods = [xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE]
content = "songs"
elif path == "station":
listItems = self.addSongsFromLibrary(self.api.getStationTracks(get('id')), 'library')
content = "songs"
elif path == "playlists":
listItems = self.getPlaylists(get('type'))
elif path == "filter" and 'album' == get('criteria'):
listItems = self.listAlbums(get('criteria'))
sortMethods = [xbmcplugin.SORT_METHOD_ALBUM_IGNORE_THE, xbmcplugin.SORT_METHOD_VIDEO_YEAR,
xbmcplugin.SORT_METHOD_ARTIST, xbmcplugin.SORT_METHOD_ALBUM, xbmcplugin.SORT_METHOD_DATE]
content = "albums"
elif path in ["artist", "genre"] and get('name'):
album_name = get('name')
paramsAllSongs = {'path': "allcriteriasongs", 'criteria': path, 'name': album_name}
listItems.insert(0, self.createFolder('* ' + self.lang(30201), paramsAllSongs))
listItems.extend(self.listAlbums(path, album_name))
sortMethods = [xbmcplugin.SORT_METHOD_ALBUM_IGNORE_THE, xbmcplugin.SORT_METHOD_VIDEO_YEAR,
xbmcplugin.SORT_METHOD_ARTIST, xbmcplugin.SORT_METHOD_ALBUM, xbmcplugin.SORT_METHOD_DATE]
content = "albums"
elif path == "filter":
listItems = self.getCriteria(get('criteria'))
sortMethods = [xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE]
elif path == "allcriteriasongs":
listItems = self.listAllCriteriaSongs(get('criteria'), get('name'))
sortMethods = [xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE]
content = "songs"
elif path in ["genre", "artist", "album", "composer"]:
songs = self.api.getFilterSongs(path, get('album'), get('artist', ''))
listItems = self.addSongsFromLibrary(songs, 'library')
sortMethods = [xbmcplugin.SORT_METHOD_TRACKNUM, xbmcplugin.SORT_METHOD_TITLE_IGNORE_THE,
xbmcplugin.SORT_METHOD_PLAYCOUNT, xbmcplugin.SORT_METHOD_SONG_RATING]
content = "songs"
elif path == "search":
listItems.append(self.createFolder(self.lang(30223), {'path': 'search_new'}))
history = utils.addon.getSetting('search-history').split('|')
for item in history:
if item:
listItems.append(self.createFolder(item, {'path': 'search_query', 'query': item}))
elif path == "search_new":
keyboard = xbmc.Keyboard('', self.lang(30208))
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
listItems = self.getSearch(keyboard.getText())
history = utils.addon.getSetting('search-history')
history = keyboard.getText() + ('|' + history if history else '')
if len(history.split('|')) > 10:
history = '|'.join(history.split('|')[0:-1])
utils.addon.setSetting('search-history', history)
content = "songs"
else:
return
elif path == "search_query":
listItems = self.getSearch(get("query"))
content = "songs"
elif path == "search_result":
utils.log("SEARCH_RESULT: " + get('query'))
listItems = self.getSearch(params)
content = "songs"
elif path == "listennow":
listItems = self.getListennow(self.api.getApi().get_listen_now_items())
content = "albums"
elif path == "topcharts":
listItems.append(self.createFolder(self.lang(30206), {'path': 'topcharts_albums'}))
listItems.append(self.createFolder(self.lang(30213), {'path': 'topcharts_songs'}))
elif path == "topcharts_songs":
listItems = self.addSongsFromLibrary(self.api.getTopcharts(), 'library')
content = "songs"
elif path == "topcharts_albums":
listItems = self.createAlbumFolder(self.api.getTopcharts(content_type='albums'))
content = "albums"
elif path == "newreleases":
listItems = self.createAlbumFolder(self.api.getNewreleases())
content = "albums"
elif path == "browse_stations":
listItems = self.browseStations(get('category'))
elif path == "get_stations":
listItems = self.getCategoryStations(self.api.getApi().get_stations(get('subcategory')))
elif path == "create_station":
if not utils.addon.getSettingBool('subscriber'):
xbmc.executebuiltin("XBMC.Notification(%s,%s,5000,%s)" % (
utils.plugin, utils.tryEncode("Song skipping is limited!"), utils.addon.getAddonInfo('icon')))
tracks = self.api.startRadio(get('name'), artist_id=get('artistid'), genre_id=get('genreid'),
curated_station_id=get('curatedid'), track_id=get('trackid'))
listItems = self.addSongsFromLibrary(tracks, 'library')
content = "songs"
# utils.playAll(tracks)
# utils.setResolvedUrl(listItems[0][1])
elif path == "genres":
listItems = self.getGenres(self.api.getApi().get_top_chart_genres())
elif path == "store_album":
utils.log("ALBUM: " + get('album_id'))
listItems = self.addSongsFromLibrary(self.api.getAlbum(get('album_id')), 'library')
content = "songs"
elif path == "artist_topsongs":
listItems = self.addSongsFromLibrary(self.api.getArtistInfo(get('artistid'))['tracks'], 'library')
content = "songs"
elif path == "related_artists":
listItems = []
items = self.api.getArtistInfo(get('artistid'), False, 0, relartists=10)['relartists']
for item in items:
params = {'path': 'artist_topsongs', 'artistid': item['artistId']}
listItems.append(self.createFolder(item['name'], params, arturl=item['artistArtRef']))
elif path == "home_recents":
listItems = self.get_recents()
content = "album"
else:
utils.log("Invalid path: " + get("path"))
return
utils.setDirectory(listItems, content, sortMethods)
def getMenuItems(self, items):
menuItems = []
for menu_item in items:
params = menu_item['params']
cm = []
if 'playlist_id' in params:
cm = self.getPlayAllContextMenu(menu_item['title'], params['playlist_id'])
elif 'type' in params:
cm.append(self.create_menu(30304, "update_playlists", {'playlist_type': params['type']}))
cm.append(self.create_menu(30306, "add_favourite", {'path': 'playlists', 'playlist_type': params['type'], 'title': menu_item['title']}))
cm.append(self.create_menu(30316, "create_playlist"))
elif params['path'] == 'library':
cm.append(self.create_menu(30305, "update_library"))
cm.append(self.create_menu(30306, "add_favourite", {'path': 'library', 'title': menu_item['title']}))
elif 'criteria' in params:
cm.append(self.create_menu(30306, "add_favourite", {'path': 'filter', 'criteria': params['criteria'], 'title': menu_item['title']}))
menuItems.append(self.createFolder(menu_item['title'], params, cm))
return menuItems
def listPlaylistSongs(self, playlist_id):
utils.log("Loading playlist: " + playlist_id)
songs = self.api.getPlaylistSongs(playlist_id)
if playlist_id == 'videos':
return self.addVideosFromLibrary(songs)
if playlist_id in ('thumbsup', 'lastadded', 'mostplayed', 'freepurchased', 'feellucky', 'all_songs', 'shuffled_albums'):
return self.addSongsFromLibrary(songs, 'library')
return self.addSongsFromLibrary(songs, 'playlist' + playlist_id)
def addVideosFromLibrary(self, library):
listItems = []
for song in library:
li = ListItem(song['display_name'], offscreen=True)
li.setArt({'thumb': song['albumart'], 'fanart': song['artistart']})
li.setProperties({'IsPlayable': 'true', 'Video': 'true'})
listItems.append(["plugin://plugin.video.youtube/play/?video_id=%s" % song['videoid'], li])
return listItems
def addSongsFromLibrary(self, library, song_type):
return [[utils.getUrl(song), self.createItem(song, song_type)] for song in library]
def listAllCriteriaSongs(self, filter_type, filter_criteria):
songs = self.api.getFilterSongs(filter_type, filter_criteria, '')
listItems = []
# add album name when showing all artist songs
for song in songs:
songItem = self.createItem(song, 'library')
songItem.setLabel("".join(['[', song['album'], '] ', song['title']]))
songItem.setLabel2(song['album'])
listItems.append([utils.getUrl(song), songItem])
return listItems
def createItem(self, song, song_type):
infoLabels = {
'tracknumber': song['tracknumber'], 'duration': song['duration'], 'year': song['year'],
'genre': song['genre'], 'album': song['album'], 'artist': song['artist'], 'title': song['title'],
'playcount': song['playcount'], 'rating': song['rating'], 'discnumber': song['discnumber'],
'mediatype': 'song'
}
li = utils.createItem(song['display_name'], song['albumart'], song['artistart'])
li.setInfo(type='Music', infoLabels=infoLabels)
li.addContextMenuItems(self.getSongContextMenu(song['song_id'], song['display_name'], song_type))
return li
def getPlaylists(self, playlist_type):
utils.log("Getting playlists of type: " + playlist_type)
listItems = []
append = listItems.append
addFolder = self.createFolder
if playlist_type == 'radio':
for rs in self.api.getStations():
# utils.log(repr(rs))
image = rs['compositeArtRefs'][0]['url'] if 'compositeArtRefs' in rs else rs['imageUrls'][0]['url'] if 'imageUrls' in rs else None
cm = self.getRadioContextMenu(rs['name'], rs['id'])
append(addFolder(rs['name'], {'path': "station", 'id': rs['id']}, cm, image))
elif playlist_type == 'auto':
auto = [['thumbsup', self.lang(30215)], ['lastadded', self.lang(30216)],
['freepurchased', self.lang(30217)], ['mostplayed', self.lang(30218)],
['videos', 'Videos']]
for pl_id, pl_name in auto:
cm = self.getPlayAllContextMenu(pl_name, pl_id)
append(addFolder(pl_name, {'path': "playlist", 'playlist_id': pl_id}, cm))
else:
for pl_id, pl_name, pl_arturl, pl_token, pl_recent in self.api.getPlaylistsByType(playlist_type):
cm = self.getPlayAllContextMenu(pl_name, pl_id, pl_token)
append(addFolder(pl_name, {'path': "playlist", 'playlist_id': pl_id}, cm, pl_arturl))
return listItems
def listAlbums(self, criteria, name=''):
utils.log("LIST ALBUMS: " + repr(criteria) + " " + repr(name))
listItems = []
getCm = self.getFilterContextMenu
items = self.api.getCriteria(criteria, name)
for item in items:
# utils.log(repr(item))
album = item['album']
artist = item['album_artist']
params = {'path': criteria, 'album': album, 'artist': artist}
folder = self.createFolder(album, params, getCm(criteria, album, artist), item['arturl'], artist, item['artistart'])
folder[1].setInfo(type='Music', infoLabels={'year': item['year'], 'artist': artist, 'album': album,
'date': time.strftime('%d.%m.%Y', time.gmtime(item['date'] / 1000000)), 'mediatype': 'album'})
listItems.append(folder)
return listItems
def getCriteria(self, criteria):
utils.log("CRITERIA: " + repr(criteria))
folder = self.createFolder
getCm = self.getFilterContextMenu
items = self.api.getCriteria(criteria)
if criteria in ('artist', 'genre'):
return [folder(item['criteria'], {'path': criteria, 'name': item['criteria']},
getCm(criteria, item['criteria']), item['arturl'], fanarturl=item['arturl']) for item in items]
else:
return [folder(item['criteria'], {'path': criteria, 'album': item['criteria']},
getCm(criteria, item['criteria'])) for item in items]
def get_recents(self):
listItems = []
dictItems = {}
addFolder = self.createFolder
for pl_id, pl_name, pl_arturl, pl_token, pl_recent in self.api.getPlaylistsByType('user'):
cm = self.getPlayAllContextMenu(pl_name, pl_id, pl_token)
dictItems[int(pl_recent)] = addFolder(pl_name+" (Playlist)", {'path': 'playlist', 'playlist_id': pl_id}, cm, pl_arturl)
from datetime import datetime, timedelta
filtertime = ((datetime.today() - timedelta(40)) - datetime(1970,1,1)).total_seconds() * 1000000
for rs in self.api.getStations():
if int(rs['lastModifiedTimestamp']) < filtertime:
continue
image = rs['compositeArtRefs'][0]['url'] if 'compositeArtRefs' in rs else rs['imageUrls'][0]['url'] if 'imageUrls' in rs else None
cm = self.getRadioContextMenu(rs['name'], rs['id'])
if rs['seed']['seedType'] == '3':
rs['name'] = rs['name'] + " Radio"
dictItems[int(rs['recentTimestamp'])] = addFolder(rs['name'], {'path': 'station', 'id': rs['id']}, cm, image)
#for song in self.api.getRecent():
# cm = self.getFilterContextMenu("album", song['album'], song['artist'])
# dictItems[song['recent']] = addFolder(song['album'], {'path': 'album', 'album': song['album'], 'artist': song['artist']}, cm, song['albumart'])
for key in sorted(dictItems.keys(), reverse=True):
#utils.log("RECENTS: "+str(key)+" "+repr(dictItems[key][1].getLabel()))
listItems.append(dictItems[key])
return listItems
def getListennow(self, items):
listItems = []
for item in items:
suggestion = item.get('suggestion_text')
image = item.get('images', [{'url': ''}])[0]['url']
# defualt to radio station
item_type = item.get('type', '3')
if item['type'] == '1':
album = item['album']
listItems.extend(self.createAlbumFolder([{
'name': album['title'] + ' (' + suggestion + ')',
'artist': album['artist_name'],
'albumArtRef': image,
'albumId': album['id']['metajamCompactKey']}]))
elif item['type'] == '3':
radio = item['radio_station']
params = {'path': 'create_station',
'name': utils.tryEncode('Radio %s (%s)' % (radio['title'], suggestion))}
params.update(self.getStationSeed(radio['id']['seeds'][0]))
listItems.append(self.createFolder(params['name'], params, arturl=image))
else:
utils.log("ERROR item type unknown " + repr(item['type']))
return listItems
def get_situations(self):
listItems = []
items = self.api.get_situations()
for item in items:
params = {'path': 'situation_items', 'situation_id': item['id']}
listItems.append(self.createFolder(item['title'], params, arturl=item.get('imageUrl'), fanarturl=item.get('wideImageUrl')))
return listItems
def get_situations_items(self, situation_id):
listItems = []
items = self.api.get_situations()
for item in items:
if item['id'] == situation_id:
##return self.getListennow(item['stations'])
return self.getCategoryStations(item['stations'])
utils.log("ERROR Situation not found: "+situation_id)
return None
def browseStations(self, index=None):
listItems = []
items = self.api.getStationsCategories()
utils.log("INDEX:"+repr(index)+"\n"+repr(items))
if index:
# list subcategories from category index
items = items[int(index)].get('subcategories')
for item in items:
# populate with categories or subcategories
if 'subcategories' in item:
params = {'path': 'browse_stations'}
else:
params = {'path': 'get_stations'}
params['category'] = items.index(item)
params['subcategory'] = item['id']
listItems.append(self.createFolder(item['display_name'], params))
return listItems
def getCategoryStations(self, items):
listItems = []
utils.log("STATIONS: "+repr(items))
for item in items:
#utils.log("STATION: "+repr(item))
params = {'path': 'create_station', 'name': utils.tryEncode(item['name'])}
params.update(self.getStationSeed(item['seed']))
url1 = item['compositeArtRefs'][0]['url'] if 'compositeArtRefs' in item else ''
url2 = item['imageUrls'][0]['url']
folder = self.createFolder(item['name'], params, arturl=url1, name2=item.get('description'), fanarturl=url2)
folder[1].setInfo(type='Music', infoLabels={'comment': item.get('description', 'No description'),
'date': time.strftime('%d.%m.%Y', time.gmtime(item.get('recentTimestamp', 0) / 1000000))})
listItems.append(folder)
return listItems
def getStationSeed(self, seed):
seed_id = {}
if seed['seedType'] == '3':
seed_id['artistid'] = seed['artistId']
elif seed['seedType'] == '5':
seed_id['genreid'] = seed['genreId']
elif seed['seedType'] == '2':
seed_id['trackid'] = seed['trackId']
elif seed['seedType'] == '9':
seed_id['curatedid'] = seed['curatedStationId']
else:
utils.log("ERROR seedtype unknown " + repr(seed['seedType']))
return seed_id
def createAlbumFolder(self, items):
listItems = []
for item in items:
params = {'path': 'store_album', 'album_id': item['albumId']}
cm = [self.create_menu(30301, "play_all", params),
self.create_menu(30309, "add_album_library", params),
self.create_menu(30315, "add_to_queue", params)]
folder = self.createFolder("[%s] %s" % (item['artist'], item['name']), params, cm, item.get('albumArtRef', ''),
item.get('description'), fanarturl=item.get('artistArtRef', ''))
folder[1].setInfo(type='Music', infoLabels={'comment': item.get('description', 'No description'),
'artist': item['artist'], 'album': item['name'], 'mediatype': 'album'})
listItems.append(folder)
# print repr(items)
return listItems
def createFolder(self, name, params, contextMenu=[], arturl='', name2='*', fanarturl=fanart):
li = ListItem(label=name, label2=name2, offscreen=True)
li.setArt({'thumb': arturl, 'fanart': fanarturl})
li.addContextMenuItems(contextMenu)
return "?".join([utils.addon_url, urlencode(params, doseq=True)]), li, "true"
def getSongContextMenu(self, song_id, display_name, song_type):
params = {'song_id': song_id, 'display_name': display_name}
cm = []
if song_id.startswith('T'):
cm.append(self.create_menu(30309, "add_library", params))
cm.append(self.create_menu(30319, "artist_topsongs", params))
cm.append(self.create_menu(30320, "related_artists", params))
if song_type == 'library':
cm.append(self.create_menu(30307, "add_playlist", params))
elif song_type.startswith('playlist'):
playlist = {'song_id': song_id, 'display_name': display_name, 'playlist_id': song_type[8:]}
cm.append(self.create_menu(30322, "play_all", playlist))
cm.append(self.create_menu(30308, "del_from_playlist", playlist))
cm.append(self.create_menu(30409, "set_thumbs", params))
cm.append(self.create_menu(30313, "play_yt", params))
cm.append(self.create_menu(30311, "search_yt", params))
cm.append(self.create_menu(30310, "start_radio", params))
return cm
def getRadioContextMenu(self, name, radio_id):
params = {'radio_id': radio_id, 'title': name}
shuffle = params.copy()
shuffle.update({'shuffle': 'true'})
return [
self.create_menu(30301, "play_all", params),
self.create_menu(30302, "play_all", shuffle),
self.create_menu(30312, "play_all_yt", params),
self.create_menu(30321, "play_all_yt", shuffle),
self.create_menu(30306, "add_favourite", {'radio_id': radio_id, 'title': name, 'path': 'playlist'}),
self.create_menu(30315, "add_to_queue", params),
self.create_menu(30318, "delete_station", params)
]
def getPlayAllContextMenu(self, name, playlist, token=None):
params = {'playlist_id': playlist, 'title': name}
shuffle = params.copy()
shuffle.update({'shuffle': 'true'})
cm = [
self.create_menu(30301, "play_all", params),
self.create_menu(30302, "play_all", shuffle),
self.create_menu(30312, "play_all_yt",params),
self.create_menu(30321, "play_all_yt", shuffle),
self.create_menu(30306, "add_favourite", {'playlist_id': playlist, 'title': name, 'path': 'playlist'}),
self.create_menu(30315, "add_to_queue", params),
self.create_menu(30317, "delete_playlist", params)
]
if token:
cm.append(self.create_menu(30310, "start_radio", {'playlist_id': playlist, 'title': name, 'token': token}))
return cm
def getFilterContextMenu(self, filter_type, filter_criteria, artist=''):
params = {'filter_type': filter_type, 'filter_criteria': filter_criteria, 'artist': artist}
shuffle = params.copy()
shuffle.update({'shuffle': 'true'})
return [
self.create_menu(30301, "play_all", params),
self.create_menu(30302, "play_all", shuffle),
self.create_menu(30312, "play_all_yt", params),
self.create_menu(30321, "play_all_yt", shuffle),
self.create_menu(30306, "add_favourite", {'path': filter_type, 'name': filter_criteria, 'title': filter_criteria}),
self.create_menu(30315, "add_to_queue", params),
self.create_menu(30208, "search", params),
]
def create_menu(self, text_code, action, params={'1':1}):
return self.lang(text_code), self.contextmenu_action % (action, urlencode(params, doseq=True))
def getSearch(self, query):
listItems = []
def listAlbumsResults():
for album in result['albums']:
if 'albumId' in album:
listItems.extend(self.createAlbumFolder([album]))
else:
params = {'path': 'album', 'album': utils.tryEncode(album['name']), 'artist': utils.tryEncode(album['artist'])}
cm = self.getFilterContextMenu('album', album['name'])
folder_name = "[%s] %s" % (album['artist'], album['name'])
listItems.append(self.createFolder(folder_name, params, cm, album['albumart'], album['artistart']))
def listArtistsResults():
cm = []
for artist in result['artists']:
params = {'path': 'artist', 'name': utils.tryEncode(artist['name'])}
if 'artistId' in artist:
params = {'path': 'search_result', 'artistid': artist['artistId'], 'query': utils.tryEncode(artist['name'])}
cm = [self.create_menu(30301, "play_all", {'artist_id': artist['artistId']})]
art = artist['artistArtRef']
listItems.append(self.createFolder(artist['name'], params, cm, arturl=art, fanarturl=art))
if isinstance(query, str):
result = self.api.getSearch(query)
if result['artists']:
listItems.append(self.createFolder('[COLOR orange]*** ' + self.lang(30205) + ' ***[/COLOR] +>',
{'path': 'search_result', 'type': 'artist', 'query': query}))
listArtistsResults()
if result['albums']:
listItems.append(self.createFolder('[COLOR orange]*** ' + self.lang(30206) + ' ***[/COLOR] +>',
{'path': 'search_result', 'type': 'album', 'query': query}))
listAlbumsResults()
if result['tracks']:
listItems.append(self.createFolder('[COLOR orange]*** ' + self.lang(30213) + ' ***[/COLOR] +>',
{'path': 'search_result', 'type': 'track', 'query': query}))
listItems.extend(self.addSongsFromLibrary(result['tracks'], 'library'))
if result['stations']:
listItems.append(
self.createFolder('[COLOR orange]*** ' + self.lang(30203) + ' ***[/COLOR]', {'path': 'none'}))
listItems.extend(self.getCategoryStations(result['stations']))
if result['videos']:
listItems.append(self.createFolder('[COLOR orange]*** Youtube ***[/COLOR]', {'path': 'none'}))
for video in result['videos']:
listItems.append(
self.createFolder(video['title'], {'action': 'play_yt', 'display_name': video['title']}))
elif 'artistid' in query:
result = self.api.getArtistInfo(query['artistid'], True, 20, 0)
if result['albums']:
listItems.append(
self.createFolder('[COLOR orange]*** ' + self.lang(30206) + ' ***[/COLOR]', {'path': 'none'}))
listAlbumsResults()
listItems.append(
self.createFolder('[COLOR orange]*** ' + self.lang(30213) + ' ***[/COLOR]', {'path': 'none'}))
listItems.extend(self.addSongsFromLibrary(result['tracks'], 'library'))
elif 'type' in query:
result = self.api.getSearch(query['query'], max_results=50)
if query['type'] == 'artist':
listArtistsResults()
elif query['type'] == 'album':
listAlbumsResults()
elif query['type'] == 'track':
listItems.extend(self.addSongsFromLibrary(result['tracks'], 'library'))
else:
listItems.extend(self.getSearch(query['query']))
return listItems
| gpl-3.0 | 4,076,895,093,247,734,000 | 48.290373 | 156 | 0.560502 | false |
siavooshpayandehazad/high_level_test_pattern_gen | src/DetPatGen/ALU.py | 1 | 1113 | import numpy
def alu( op1, op2, alu_op, c_in):
if alu_op == "0000": # mov
result = op2
elif alu_op == "0001": # add
result = op1 + op2
elif alu_op == "0010": # sub
result = op1 - op2
elif alu_op == "0011": # cmp
result = op1
elif alu_op == "0100": # and
result = numpy.bitwise_and(op1, op2)
elif alu_op == "0101": # or
result = numpy.bitwise_or(op1, op2)
elif alu_op == "0110": # xor
result = numpy.bitwise_xor(op1, op2)
elif alu_op == "0111": # not
result = numpy.invert(op2)
elif alu_op == "1000": # shl
result = numpy.left_shift(op1,1)
elif alu_op == "1001": # shr
result = numpy.right_shift(op1,1)
elif alu_op == "1010": # asr
result = numpy.bitwise_or(numpy.bitwise_and(op1, 128), numpy.right_shift(op1,1))
elif alu_op == "1011": # inc
result = op1 + 1
elif alu_op == "1100": # dec
result = op1 - 1
elif alu_op == "1101": # rlc
result = numpy.bitwise_or(128*c_in, numpy.right_shift(op1,1))
elif alu_op == "1110": # rrc
result = numpy.bitwise_or(c_in, numpy.left_shift(op1,1))
elif alu_op == "1111": # nop
result = 0
return result | gpl-3.0 | 402,474,652,033,670,900 | 29.944444 | 82 | 0.592992 | false |
shoopio/shoop | shuup/gdpr/models.py | 1 | 8040 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import activate, get_language
from parler.models import TranslatableModel, TranslatedFields
from reversion.models import Version
from shuup.gdpr.utils import get_active_consent_pages
from shuup.simple_cms.models import Page
GDPR_ANONYMIZE_TASK_TYPE_IDENTIFIER = "gdpr_anonymize"
@python_2_unicode_compatible
class GDPRSettings(TranslatableModel):
shop = models.OneToOneField("shuup.Shop", related_name="gdpr_settings")
enabled = models.BooleanField(
default=False,
verbose_name=_('enabled'),
help_text=_("Define if the GDPR is active.")
)
skip_consent_on_auth = models.BooleanField(
default=False,
verbose_name=_("skip consent on login"),
help_text=_("Do not require consent on login when GDPR is activated.")
)
privacy_policy_page = models.ForeignKey(
"shuup_simple_cms.Page",
null=True,
verbose_name=_("privacy policy page"),
help_text=_("Choose your privacy policy page here. If this page changes, customers will be "
"prompted for new consent."))
consent_pages = models.ManyToManyField(
"shuup_simple_cms.Page",
verbose_name=_("consent pages"),
related_name="consent_settings",
help_text=_("Choose pages here which are being monitored for customer consent. If any of these pages change"
", the customer is being prompted for a new consent."))
translations = TranslatedFields(
cookie_banner_content=models.TextField(
blank=True,
verbose_name=_("cookie banner content"),
help_text=_("The text to be presented to users in a pop-up warning.")
),
cookie_privacy_excerpt=models.TextField(
blank=True,
verbose_name=_("cookie privacy excerpt"),
help_text=_("The summary text to be presented about cookie privacy.")
),
auth_consent_text=models.TextField(
blank=True,
verbose_name=_("login consent text"),
help_text=_("Shown in login page between the form and the button. "
"Optional but should be considered when the consent on login is disabled.")
)
)
class Meta:
verbose_name = _('GDPR settings')
verbose_name_plural = _('GDPR settings')
def __str__(self):
return _("GDPR for {}").format(self.shop)
def set_default_content(self):
language = get_language()
for code, name in settings.LANGUAGES:
activate(code)
self.set_current_language(code)
self.cookie_banner_content = settings.SHUUP_GDPR_DEFAULT_BANNER_STRING
self.cookie_privacy_excerpt = settings.SHUUP_GDPR_DEFAULT_EXCERPT_STRING
self.save()
self.set_current_language(language)
activate(language)
@classmethod
def get_for_shop(cls, shop):
instance, created = cls.objects.get_or_create(shop=shop)
if created or not instance.safe_translation_getter("cookie_banner_content"):
instance.set_default_content()
return instance
@python_2_unicode_compatible
class GDPRCookieCategory(TranslatableModel):
shop = models.ForeignKey("shuup.Shop", related_name="gdpr_cookie_categories")
always_active = models.BooleanField(default=False, verbose_name=_('always active'))
default_active = models.BooleanField(
verbose_name=_('active by default'),
default=False,
help_text=_('whether this cookie category is active by default')
)
cookies = models.TextField(
verbose_name=_("cookies used"),
help_text=_(
"Comma separated list of cookies names, prefix or suffix "
"that will be included in this category, "
"e.g. _ga, mysession, user_c_"
),
)
translations = TranslatedFields(
name=models.CharField(max_length=64, verbose_name=_("name")),
how_is_used=models.TextField(
verbose_name=_("how we use"),
help_text=_("Describe the purpose of this category of cookies and how it is used."),
blank=True
)
)
class Meta:
verbose_name = _('GDPR cookie category')
verbose_name_plural = _('GDPR cookie categories')
def __str__(self):
return _("GDPR cookie category for {}").format(self.shop)
@python_2_unicode_compatible
class GDPRUserConsent(models.Model):
created_on = models.DateTimeField(
auto_now_add=True,
editable=False,
db_index=True,
verbose_name=_("created on")
)
shop = models.ForeignKey(
"shuup.Shop",
related_name="gdpr_consents",
editable=False
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='gdpr_consents',
on_delete=models.PROTECT,
editable=False
)
documents = models.ManyToManyField(
"GDPRUserConsentDocument",
verbose_name=_("consent documents"),
blank=True,
editable=False
)
class Meta:
verbose_name = _('GDPR user consent')
verbose_name_plural = _('GDPR user consents')
@classmethod
def ensure_for_user(cls, user, shop, consent_documents):
documents = []
for page in consent_documents:
Page.create_initial_revision(page)
version = Version.objects.get_for_object(page).first()
consent_document = GDPRUserConsentDocument.objects.create(
page=page,
version=version
)
documents.append(consent_document)
# ensure only one consent exists for this user in this shop
consent = cls.objects.filter(shop=shop, user=user).first()
if consent:
consents = cls.objects.filter(shop=shop, user=user).order_by("-created_on")
if consents.count() > 1:
# There are multiple consents, remove excess
ids = [c.id for c in consents.all() if c.id != consent.id]
cls.objects.filter(pk__in=ids).delete()
else:
consent = cls.objects.create(shop=shop, user=user)
consent.documents = documents
return consent
@classmethod
def get_for_user(cls, user, shop):
return cls.objects.filter(user=user, shop=shop).order_by("-created_on").first()
def should_reconsent(self, shop, user):
consent_pages_ids = set([page.id for page in get_active_consent_pages(shop)])
page_ids = set([doc.page.id for doc in self.documents.all()])
if consent_pages_ids != page_ids:
return True
# all matches, check versions
for consent_document in self.documents.all():
version = Version.objects.get_for_object(consent_document.page).first()
if consent_document.version != version:
return True
return False
def should_reconsent_to_page(self, page):
version = Version.objects.get_for_object(page).first()
return not self.documents.filter(page=page, version=version).exists()
def __str__(self):
return _("GDPR user consent in {} for user {} in shop {}").format(self.created_on, self.user, self.shop)
@python_2_unicode_compatible
class GDPRUserConsentDocument(models.Model):
page = models.ForeignKey("shuup_simple_cms.Page")
version = models.ForeignKey(Version)
def __str__(self):
return _("GDPR user consent document for {} (Version: {})").format(self.page, self.version)
| agpl-3.0 | -4,302,238,486,277,676,000 | 36.050691 | 116 | 0.631219 | false |
numerodix/luna | tests/parser/test_expr.py | 1 | 3573 | from luna.ast import *
## Highest to lowest precedence: tests for associativity
def test_power1(parse_expr):
assert Expr(
Power(
Number('3'),
Power(
Number('4'),
Number('5'),
),
),
) == parse_expr('3 ^ 4 ^ 5')
def test_unary1(parse_expr):
assert Expr(
UnaryOp(
Operator('-'),
Number('4'),
),
) == parse_expr('- 4')
def test_term1(parse_expr):
assert Expr(
Term(
Term(
Number('2'),
Operator('/'),
Number('4'),
),
Operator('*'),
Number('8'),
),
) == parse_expr('2 / 4 * 8')
def test_arith1(parse_expr):
assert Expr(
Arith(
Arith(
Number('2'),
Operator('+'),
Number('4'),
),
Operator('-'),
Number('8'),
),
) == parse_expr('2 + 4 - 8')
def test_concat1(parse_expr):
assert Expr(
Concat(
Number('2'),
Concat(
Number('8'),
Number('19'),
),
),
) == parse_expr('2..8..19')
def test_cmp1(parse_expr):
assert Expr(
Cmp(
Cmp(
Number('2'),
Operator('<'),
Number('3'),
),
Operator('>='),
Number('1'),
),
) == parse_expr('2 < 3 >= 1')
def test_and1(parse_expr):
assert Expr(
And(
And(
Number('1'),
Number('1'),
),
Number('2'),
),
) == parse_expr('1 and 1 and 2')
def test_or1(parse_expr):
assert Expr(
Or(
Or(
Number('1'),
Number('1'),
),
Number('2'),
),
) == parse_expr('1 or 1 or 2')
## Precedence
def test_mixedops1(parse_expr):
assert Expr(
Arith(
Term(
Number('5'),
Operator('*'),
Power(
Number('4'),
Number('2'),
),
),
Operator('+'),
Number('9'),
),
) == parse_expr('5 * 4 ^ 2 + 9')
def test_mixedops2(parse_expr):
assert Expr(
Term(
Expr(
Arith(
Number('1'),
Operator('+'),
Number('3'),
),
),
Operator('*'),
Number('4'),
),
) == parse_expr('(1 + 3) * 4')
def test_mixedops3(parse_expr):
assert Expr(
Or(
And(
UnaryOp(
Operator('not'),
Number('3'),
),
Number('3'),
),
Cmp(
Number('4'),
Operator('>'),
Number('4'),
),
),
) == parse_expr('not 3 and 3 or 4 > 4')
## Funccalls
def test_funcall1(parse_expr):
assert Expr(
Call(
Identifier('print'),
Args(
Identifier('x'),
),
),
) == parse_expr('print(x)')
def test_funcall2(parse_expr):
assert Expr(
Call(
Identifier('print'),
Args(
parse_expr('x and y'),
parse_expr('z'),
),
),
) == parse_expr('print(x and y, z)')
| mit | 6,562,127,079,191,814,000 | 18.85 | 56 | 0.34005 | false |
jowolf/thelibregroup | fabfile.py | 1 | 16139 | import os
import re
import sys
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from contextlib import contextmanager
from posixpath import join
from fabric.api import env, cd, prefix, sudo as _sudo, run as _run, hide, task
from fabric.contrib.files import exists, upload_template
from fabric.colors import yellow, green, blue, red
################
# Config setup #
################
conf = {}
if sys.argv[0].split(os.sep)[-1] in ("fab", # POSIX
"fab-script.py"): # Windows
# Ensure we import settings from the current dir
try:
conf = __import__("settings", globals(), locals(), [], 0).FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print "Aborting, no hosts defined."
exit()
env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [])
env.proj_name = conf.get("PROJECT_NAME", os.getcwd().split(os.sep)[-1])
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s" % env.user)
env.venv_path = "%s/%s" % (env.venv_home, env.proj_name)
env.proj_dirname = "project"
env.proj_path = "%s/%s" % (env.venv_path, env.proj_dirname)
env.manage = "%s/bin/python %s/project/manage.py" % (env.venv_path,
env.venv_path)
env.live_host = conf.get("LIVE_HOSTNAME", env.hosts[0] if env.hosts else None)
env.repo_url = conf.get("REPO_URL", "")
env.git = env.repo_url.startswith("git") or env.repo_url.endswith(".git")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.gunicorn_port = conf.get("GUNICORN_PORT", 8000)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
##################
# Template setup #
##################
# Each template gets uploaded at deploy time, only if their
# contents has changed, in which case, the reload command is
# also run.
templates = {
"nginx": {
"local_path": "deploy/nginx.conf",
"remote_path": "/etc/nginx/sites-enabled/%(proj_name)s.conf",
"reload_command": "service nginx restart",
},
"supervisor": {
"local_path": "deploy/supervisor.conf",
"remote_path": "/etc/supervisor/conf.d/%(proj_name)s.conf",
"reload_command": "supervisorctl reload",
},
"cron": {
"local_path": "deploy/crontab",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn.conf.py",
"remote_path": "%(proj_path)s/gunicorn.conf.py",
},
"settings": {
"local_path": "deploy/live_settings.py",
"remote_path": "%(proj_path)s/local_settings.py",
},
}
######################################
# Context for virtualenv and project #
######################################
@contextmanager
def virtualenv():
"""
Runs commands within the project's virtualenv.
"""
with cd(env.venv_path):
with prefix("source %s/bin/activate" % env.venv_path):
yield
@contextmanager
def project():
"""
Runs commands within the project's directory.
"""
with virtualenv():
with cd(env.proj_dirname):
yield
@contextmanager
def update_changed_requirements():
"""
Checks for changes in the requirements file across an update,
and gets new requirements if changes have occurred.
"""
reqs_path = join(env.proj_path, env.reqs_path)
get_reqs = lambda: run("cat %s" % reqs_path, show=False)
old_reqs = get_reqs() if env.reqs_path else ""
yield
if old_reqs:
new_reqs = get_reqs()
if old_reqs == new_reqs:
# Unpinned requirements should always be checked.
for req in new_reqs.split("\n"):
if req.startswith("-e"):
if "@" not in req:
# Editable requirement without pinned commit.
break
elif req.strip() and not req.startswith("#"):
if not set(">=<") & set(req):
# PyPI requirement without version.
break
else:
# All requirements are pinned.
return
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
###########################################
# Utils and wrappers for various commands #
###########################################
def _print(output):
print
print output
print
def print_command(command):
_print(blue("$ ", bold=True) +
yellow(command, bold=True) +
red(" ->", bold=True))
@task
def run(command, show=True):
"""
Runs a shell comand on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _run(command)
@task
def sudo(command, show=True):
"""
Runs a command as sudo.
"""
if show:
print_command(command)
with hide("running"):
return _sudo(command)
def log_call(func):
@wraps(func)
def logged(*args, **kawrgs):
header = "-" * len(func.__name__)
_print(green("\n".join([header, func.__name__, header]), bold=True))
return func(*args, **kawrgs)
return logged
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected
def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload a
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
if not os.path.exists(local_path):
project_root = os.path.dirname(os.path.abspath(__file__))
local_path = os.path.join(project_root, local_path)
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
# Escape all non-string-formatting-placeholder occurrences of '%':
local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command)
@task
def nginx_config():
"""
Installs nginx config from template
"""
return upload_template_and_reload ('nginx')
def db_pass():
"""
Prompts for the database password if unknown.
"""
if not env.db_pass:
env.db_pass = getpass("Enter the database password: ")
return env.db_pass
@task
def apt(packages):
"""
Installs one or more system packages via apt.
"""
return sudo("apt-get install -y -q " + packages)
@task
def pip(packages):
"""
Installs one or more Python packages within the virtual environment.
"""
with virtualenv():
return sudo("pip install %s" % packages)
def postgres(command):
"""
Runs the given command as the postgres user.
"""
show = not command.startswith("psql")
return run("sudo -u root sudo -u postgres %s" % command, show=show)
@task
def psql(sql, show=True):
"""
Runs SQL against the project's database.
"""
out = postgres('psql -c "%s"' % sql)
if show:
print_command(sql)
return out
@task
def backup(filename):
"""
Backs up the database.
"""
return postgres("pg_dump -Fc %s > %s" % (env.proj_name, filename))
@task
def restore(filename):
"""
Restores the database.
"""
return postgres("pg_restore -c -d %s %s" % (env.proj_name, filename))
@task
def python(code, show=True):
"""
Runs Python code in the project's virtual environment, with Django loaded.
"""
setup = "import os; os.environ[\'DJANGO_SETTINGS_MODULE\']=\'settings\';"
full_code = 'python -c "%s%s"' % (setup, code.replace("`", "\\\`"))
with project():
result = run(full_code, show=False)
if show:
print_command(code)
return result
def static():
"""
Returns the live STATIC_ROOT directory.
"""
return python("from django.conf import settings;"
"print settings.STATIC_ROOT", show=False).split("\n")[-1]
@task
def manage(command):
"""
Runs a Django management command.
"""
return run("%s %s" % (env.manage, command))
#########################
# Install and configure #
#########################
@task
@log_call
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
locale = "LC_ALL=%s" % env.locale
with hide("stdout"):
if locale not in sudo("cat /etc/default/locale"):
sudo("update-locale %s" % locale)
run("exit")
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"postgresql libpq-dev memcached supervisor")
sudo("easy_install pip")
sudo("pip install virtualenv mercurial")
@task
@log_call
def create():
"""
Create a new virtual environment for a project.
Pulls the project's repo from version control, adds system-level
configs for the project, and initialises the database with the
live host.
"""
# Create virtualenv
with cd(env.venv_home):
if exists(env.proj_name):
prompt = raw_input("\nVirtualenv exists: %s\nWould you like "
"to replace it? (yes/no) " % env.proj_name)
if prompt.lower() != "yes":
print "\nAborting!"
return False
remove()
run("virtualenv %s --distribute" % env.proj_name)
vcs = "git" if env.git else "hg"
run("%s clone %s %s" % (vcs, env.repo_url, env.proj_path))
# Create DB and DB user.
pw = db_pass()
user_sql_args = (env.proj_name, pw.replace("'", "\'"))
user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args
psql(user_sql, show=False)
shadowed = "*" * len(pw)
print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed))
psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' "
"LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" %
(env.proj_name, env.proj_name, env.locale, env.locale))
# Set up SSL certificate.
conf_path = "/etc/nginx/conf"
if not exists(conf_path):
sudo("mkdir %s" % conf_path)
with cd(conf_path):
crt_file = env.proj_name + ".crt"
key_file = env.proj_name + ".key"
if not exists(crt_file) and not exists(key_file):
try:
crt_local, = glob(join("deploy", "*.crt"))
key_local, = glob(join("deploy", "*.key"))
except ValueError:
parts = (crt_file, key_file, env.live_host)
sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
"-subj '/CN=%s' -days 3650" % parts)
else:
upload_template(crt_local, crt_file, use_sudo=True)
upload_template(key_local, key_file, use_sudo=True)
# Set up project.
upload_template_and_reload("settings")
with project():
if env.reqs_path:
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
pip("gunicorn setproctitle south psycopg2 "
"django-compressor python-memcached")
manage("createdb --noinput --nodata")
python("from django.conf import settings;"
"from django.contrib.sites.models import Site;"
"site, _ = Site.objects.get_or_create(id=settings.SITE_ID);"
"site.domain = '" + env.live_host + "';"
"site.save();")
if env.admin_pass:
pw = env.admin_pass
user_py = ("from mezzanine.utils.models import get_user_model;"
"User = get_user_model();"
"u, _ = User.objects.get_or_create(username='admin');"
"u.is_staff = u.is_superuser = True;"
"u.set_password('%s');"
"u.save();" % pw)
python(user_py, show=False)
shadowed = "*" * len(pw)
print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed))
return True
@task
@log_call
def remove():
"""
Blow away the current project.
"""
if exists(env.venv_path):
sudo("rm -rf %s" % env.venv_path)
for template in get_templates().values():
remote_path = template["remote_path"]
if exists(remote_path):
sudo("rm %s" % remote_path)
psql("DROP DATABASE %s;" % env.proj_name)
psql("DROP USER %s;" % env.proj_name)
##############
# Deployment #
##############
@task
@log_call
def restart():
"""
Restart gunicorn worker processes for the project.
"""
pid_path = "%s/gunicorn.pid" % env.proj_path
if exists(pid_path):
sudo("kill -HUP `cat %s`" % pid_path)
else:
start_args = (env.proj_name, env.proj_name)
sudo("supervisorctl start %s:gunicorn_%s" % start_args)
@task
@log_call
def deploy():
"""
Deploy latest version of the project.
Check out the latest version of the project from version
control, install new requirements, sync and migrate the database,
collect any new static assets, and restart gunicorn's work
processes for the project.
"""
if not exists(env.venv_path):
prompt = raw_input("\nVirtualenv doesn't exist: %s\nWould you like "
"to create it? (yes/no) " % env.proj_name)
if prompt.lower() != "yes":
print "\nAborting!"
return False
create()
for name in get_templates():
upload_template_and_reload(name)
with project():
backup("last.db")
static_dir = static()
if exists(static_dir):
run("tar -cf last.tar %s" % static_dir)
git = env.git
last_commit = "git rev-parse HEAD" if git else "hg id -i"
run("%s > last.commit" % last_commit)
with update_changed_requirements():
run("git pull origin master -f" if git else "hg pull && hg up -C")
manage("collectstatic -v 0 --noinput")
manage("syncdb --noinput")
manage("migrate --noinput")
restart()
return True
@task
@log_call
def rollback():
"""
Reverts project state to the last deploy.
When a deploy is performed, the current state of the project is
backed up. This includes the last commit checked out, the database,
and all static files. Calling rollback will revert all of these to
their state prior to the last deploy.
"""
with project():
with update_changed_requirements():
update = "git checkout" if env.git else "hg up -C"
run("%s `cat last.commit`" % update)
with cd(join(static(), "..")):
run("tar -xf %s" % join(env.proj_path, "last.tar"))
restore("last.db")
restart()
@task
@log_call
def all():
"""
Installs everything required on a new system and deploy.
From the base software, up to the deployed project.
"""
install()
if create():
deploy()
| agpl-3.0 | 8,274,986,944,124,153,000 | 28.831793 | 78 | 0.564719 | false |
Cadasta/cadasta-qgis-plugin | cadasta/gui/tools/utilities/edit_text_dialog.py | 1 | 4651 | # coding=utf-8
"""
Cadasta Widget -**Edit Text Dialog**
This module provides: Login : Login for cadasta and save authnetication
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import logging
from qgis.PyQt.QtCore import pyqtSignal
from qgis.PyQt.QtGui import (
QDialog
)
from PyQt4.QtCore import QUrl, QRegExp, Qt
from PyQt4.QtGui import (
QDesktopServices,
QColor,
QTextCharFormat,
QFont,
QSyntaxHighlighter
)
from cadasta.utilities.resources import get_ui_class
__copyright__ = "Copyright 2016, Cadasta"
__license__ = "GPL version 3"
__email__ = "[email protected]"
__revision__ = '$Format:%H$'
LOGGER = logging.getLogger('CadastaQGISPlugin')
FORM_CLASS = get_ui_class(os.path.join(
'utilities', 'edit_text_dialog.ui'))
class EditTextDialog(QDialog, FORM_CLASS):
"""Dialog for just contains edit text
"""
edit_text_done = pyqtSignal()
def __init__(self, parent=None, iface=None, text=""):
"""Constructor for the dialog.
.. note:: In QtDesigner the advanced editor's predefined keywords
list should be shown in english always, so when adding entries to
cboKeyword, be sure to choose :safe_qgis:`Properties<<` and untick
the :safe_qgis:`translatable` property.
:param parent: Parent widget of this dialog.
:type parent: QWidget
:param iface: QGIS QGisAppInterface instance.
:type iface: QGisAppInterface
:param text: Default text to be shown
:type text: str
:param ok_method: Method that will be called if finished
:type ok_method: function
"""
QDialog.__init__(self, parent)
self.setupUi(self)
self.setWindowTitle('Cadasta Questionnaire')
self.highlighter = Highlighter(self.edit_text.document())
self.show()
self.edit_text.setPlainText(text)
self.ok_button.clicked.connect(
self.close_edit_text_dialog
)
self.data_schema_help.mousePressEvent = self.show_advanced_help
def show_advanced_help(self, event):
"""Show advanced help
"""
QDesktopServices().openUrl(
QUrl("https://cadasta.github.io/api-docs/#questionnaires"))
def close_edit_text_dialog(self):
"""Function that call when ok button is clicked.
"""
self.edit_text_done.emit()
self.close()
def get_text(self):
"""Getting current text in edit text.
:return: edited text
:rtype: str
"""
return self.edit_text.toPlainText()
class Highlighter(QSyntaxHighlighter):
def __init__(self, parent=None):
super(Highlighter, self).__init__(parent)
self.highlighting_rules = []
value_format = QTextCharFormat()
value_format.setForeground(Qt.darkRed)
self.highlighting_rules.append((
QRegExp("\\btrue\\b|\\bnull\\b|\\bfalse\\b|\\b[0-9]+\\b"),
value_format
))
quotation_format = QTextCharFormat()
quotation_format.setForeground(Qt.darkGreen)
self.highlighting_rules.append((QRegExp("\".*\""),
quotation_format))
self.comment_start_expression = QRegExp("/\\*")
self.comment_end_expression = QRegExp("\\*/")
def highlightBlock(self, text):
for pattern, highlight_format in self.highlighting_rules:
expression = QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, highlight_format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
start_index = 0
if self.previousBlockState() != 1:
start_index = self.comment_start_expression.indexIn(text)
while start_index >= 0:
end_index = self.comment_end_expression.indexIn(text, start_index)
if end_index == -1:
self.setCurrentBlockState(1)
comment_length = len(text) - start_index
else:
comment_length = end_index - start_index + \
self.comment_end_expression.matchedLength()
start_index = self.comment_start_expression.indexIn(
text,
start_index + comment_length)
| gpl-3.0 | -5,289,462,695,124,991,000 | 31.298611 | 78 | 0.614061 | false |
poppogbr/genropy | tutorial/projects/warhammer/packages/warh/webpages/carriere.py | 1 | 2502 | #!/usr/bin/env python
# encoding: utf-8
"""
Created by Softwell on 2010-05-15.
Copyright (c) 2008 Softwell. All rights reserved.
"""
class GnrCustomWebPage(object):
maintable = 'warh.carriera'
py_requires = 'public:Public,standard_tables:TableHandlerLight,public:IncludedView'
######################## STANDARD TABLE OVERRIDDEN METHODS ################
def windowTitle(self):
return '!!Carriere personaggi'
def barTitle(self):
return '!!Carriere personaggi'
def lstBase(self, struct):
r = struct.view().rows()
r.fieldcell('nome', width='11em')
r.fieldcell('ac', width='5em')
r.fieldcell('ab', width='5em')
r.fieldcell('forza', width='3em')
r.fieldcell('resistenza', width='5em')
r.fieldcell('agilita', width='3em')
r.fieldcell('intelligenza', width='5em')
r.fieldcell('volonta', width='4em')
r.fieldcell('simpatia', width='4em')
r.fieldcell('attacchi', width='4em')
r.fieldcell('ferite', width='3em')
r.fieldcell('bonus_forza', width='3em')
r.fieldcell('bonus_res', width='5em')
r.fieldcell('mov', width='5em')
r.fieldcell('magia', width='3em')
r.fieldcell('follia', width='5em')
r.fieldcell('fato', width='5em')
return struct
def printActionBase(self):
return True
def exportActionBase(self):
return True
def orderBase(self):
return 'nome'
def queryBase(self):
return dict(column='nome', op='contains', val='')
def userCanWrite(self):
return True
def userCanDelete(self):
return True
############################## FORM METHODS ##################################
def formBaseDimension(self):
return dict(height='220px', width='800px')
def formBase(self, parentBC, disabled=False, **kwargs):
pane = parentBC.contentPane(**kwargs)
fb = pane.formbuilder(cols=8, border_spacing='4px', fld_width='2em')
fb.field('nome', width='12em', colspan=8)
fb.field('ac')
fb.field('ab')
fb.field('forza')
fb.field('resistenza')
fb.field('agilita')
fb.field('intelligenza')
fb.field('volonta')
fb.field('simpatia')
fb.field('attacchi')
fb.field('ferite')
fb.field('bonus_forza')
fb.field('bonus_res')
fb.field('mov')
fb.field('magia')
fb.field('follia')
fb.field('fato') | lgpl-2.1 | -5,609,397,692,937,491,000 | 29.52439 | 87 | 0.564349 | false |
davidh-ssec/polar2grid | polar2grid/readers/amsr2_l1b.py | 1 | 6911 | #!/usr/bin/env python3
# encoding: utf-8
# Copyright (C) 2016 Space Science and Engineering Center (SSEC),
# University of Wisconsin-Madison.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file is part of the polar2grid software package. Polar2grid takes
# satellite observation data, remaps it, and writes it to a file format for
# input into another program.
# Documentation: http://www.ssec.wisc.edu/software/polar2grid/
#
# Written by David Hoese August 2016
# University of Wisconsin-Madison
# Space Science and Engineering Center
# 1225 West Dayton Street
# Madison, WI 53706
# [email protected]
"""AMSR2 L1B files contain various parameters from the GCOM-W1 AMSR2
instrument. This reader can be used by specifying the reader name
``amsr2_l1b`` to the ``polar2grid.sh`` script.
Supported files usually have the following naming scheme::
GW1AM2_201607201808_128A_L1DLBTBR_1110110.h5
This reader's default remapping algorithm is ``nearest`` for nearest
neighbor resampling due to the instruments scan pattern and swath shape.
The ``--distance_upper_bound`` flag defaults to 12.
Currently this reader provides only the following datasets:
+---------------------------+-----------------------------------------------------------+
| Product Name | Description |
+===========================+===========================================================+
| btemp_36.5v | Brightness Temperature 36.5GHz Polarization Vertical |
+---------------------------+-----------------------------------------------------------+
| btemp_36.5h | Brightness Temperature 36.5GHz Polarization Horizontal |
+---------------------------+-----------------------------------------------------------+
| btemp_89.0av | Brightness Temperature 89.0GHz A Polarization Vertical |
+---------------------------+-----------------------------------------------------------+
| btemp_89.0ah | Brightness Temperature 89.0GHz A Polarization Horizontal |
+---------------------------+-----------------------------------------------------------+
| btemp_89.0bv | Brightness Temperature 89.0GHz B Polarization Vertical |
+---------------------------+-----------------------------------------------------------+
| btemp_89.0bh | Brightness Temperature 89.0GHz B Polarization Horizontal |
+---------------------------+-----------------------------------------------------------+
Special AMSR2 Naval Research Lab (NRL) PNG Scaling
--------------------------------------------------
A common use case for the AMSR2 L1B reader is to generate PNG images similar
to those generated by the U.S. Naval Research Lab (NRL) with a colormap and
coastlines. This requires using an alternate non-default scaling configuration
provided in the tarball. It can be used by providing the
``--rescale-configs $POLAR2GRID_HOME/rescale_configs/amsr2_png.ini``
flag when generating AMSR2 L1B GeoTIFFs.
Once this rescaling has been done, colormap files can be found in
``$POLAR2GRID_HOME/colormaps`` which can then be applied using the
the `add_colormap.sh` script.
See the :ref:`util_add_coastlines` and :ref:`util_add_colormap` documentation
for more information on generating these NRL-like PNGs.
"""
__docformat__ = "restructuredtext en"
import sys
import logging
from polar2grid.readers import ReaderWrapper, main
LOG = logging.getLogger(__name__)
DEFAULT_CHANNELS = [
# "btemp_10.7v",
# "btemp_10.7h",
"btemp_36.5v",
"btemp_36.5h",
"btemp_89.0av",
"btemp_89.0ah",
"btemp_89.0bv",
"btemp_89.0bh",
]
class Frontend(ReaderWrapper):
FILE_EXTENSIONS = [".h5"]
DEFAULT_READER_NAME = "amsr2_l1b"
DEFAULT_DATASETS = DEFAULT_CHANNELS
PRIMARY_FILE_TYPE = "amsr2_l1b"
def add_frontend_argument_groups(parser):
"""Add command line arguments to an existing parser.
:returns: list of group titles added
"""
from polar2grid.core.script_utils import ExtendAction
# Set defaults for other components that may be used in polar2grid processing
parser.set_defaults(fornav_D=10, fornav_d=1, remap_method="nearest", distance_upper_bound=12)
# Use the append_const action to handle adding products to the list
group_title = "Frontend Initialization"
group = parser.add_argument_group(title=group_title, description="swath extraction initialization options")
group.add_argument("--list-products", dest="list_products", action="store_true",
help="List available frontend products and exit")
# group.add_argument("--no-tc", dest="use_terrain_corrected", action="store_false",
# help="Don't use terrain-corrected navigation")
# group.add_argument("--day-fraction", dest="day_fraction", type=float, default=float(os.environ.get("P2G_DAY_FRACTION", 0.10)),
# help="Fraction of day required to produce reflectance products (default 0.10)")
# group.add_argument("--night-fraction", dest="night_fraction", type=float, default=float(os.environ.get("P2G_NIGHT_FRACTION", 0.10)),
# help="Fraction of night required to product products like fog (default 0.10)")
# group.add_argument("--sza-threshold", dest="sza_threshold", type=float, default=float(os.environ.get("P2G_SZA_THRESHOLD", 100)),
# help="Angle threshold of solar zenith angle used when deciding day or night (default 100)")
# group.add_argument("--dnb-saturation-correction", action="store_true",
# help="Enable dynamic DNB saturation correction (normally used for aurora scenes)")
group_title = "Frontend Swath Extraction"
group = parser.add_argument_group(title=group_title, description="swath extraction options")
group.add_argument("-p", "--products", dest="products", nargs="+", default=None, action=ExtendAction,
help="Specify frontend products to process")
return ["Frontend Initialization", "Frontend Swath Extraction"]
if __name__ == "__main__":
sys.exit(main(description="Extract VIIRS L1B swath data into binary files",
add_argument_groups=add_frontend_argument_groups))
| gpl-3.0 | 9,032,680,333,966,982,000 | 49.816176 | 138 | 0.620605 | false |
senuido/stash-scanner | lib/ItemHelper.py | 1 | 21784 | import itertools
import re
from enum import IntEnum, Enum
from array import array
from lib.CurrencyManager import cm
from lib.ItemCollection import ItemCollection
from lib.Utility import logger, dround
from lib.ItemClass import ItemClass, dir_to_id
float_expr = '[0-9]+|[0-9]+\s*\.\s*[0-9]+'
_BO_PRICE_REGEX = re.compile('.*~(?:b/o|price)({num})(?:[/\\\\]({num}))?([a-z\-]+)'.format(num=float_expr))
# _BO_PRICE_REGEX = re.compile('.*~(b/o|price)\s+([0-9]+|[0-9]+\.[0-9]+)\s+([a-z\-]+)')
_LOCALIZATION_REGEX = re.compile("<<.*>>")
superior_expr = re.compile('^Superior ')
dir_expr = re.compile(r'.*2DItems[/\\](.*)')
expr_level = re.compile('([0-9]+).*')
phys_expr = re.compile('([0-9]+)% increased Physical Damage$')
es_expr = re.compile('([0-9]+)% increased (?!maximum).*Energy Shield$')
armour_expr = re.compile('([0-9]+)% increased Armour(?! during).*$')
evasion_expr = re.compile('([0-9]+)% increased .*Evasion(?: Rating)?(?!.*during).*$')
life_expr = re.compile('([\-+][0-9]+) to maximum Life$')
strength_expr = re.compile('([\-+][0-9]+) to Strength')
dex_expr = re.compile('([\-+][0-9]+) to .*Dexterity$')
int_expr = re.compile('([\-+][0-9]+) to .*Intelligence$')
attributes_expr = re.compile('([\-+][0-9]+) to all Attributes$')
str_mods = [strength_expr, attributes_expr]
cold_res_expr = re.compile('([\-+][0-9]+)% to(?: Fire and)? Cold(?: and Lightning)? Resistances?$')
fire_res_expr = re.compile('([\-+][0-9]+)% to Fire(?: and (?:Cold|Lightning))? Resistances?$')
lightning_res_expr = re.compile('([\-+][0-9]+)% to(?: (?:Cold|Fire) and)? Lightning Resistances?$')
chaos_res_expr = re.compile('([\-+][0-9]+)% to Chaos Resistance$')
ele_res_expr = re.compile('([\-+][0-9]+)% to all Elemental Resistances$')
def get_price(price):
match = _BO_PRICE_REGEX.match(price.lower().replace(' ', ''))
if match:
num, denom, curr = match.groups()
denom = 1 if denom is None or float(denom) == 0 else float(denom)
amount = float(num) / denom
if amount == 0:
return None
return amount, curr
return None
class Item:
__slots__ = ('_item', 'c_name', 'c_base', 'ilvl', 'links_count', 'corrupted', 'mirrored', 'identified', 'stacksize',
'implicit', 'explicit', 'enchant', 'craft', '_mods', 'sockets_count', 'buyout', 'type',
'crafted', 'enchanted', 'modcount',
'_quality', '_level', '_exp', '_tier',
'price', # price before conversion
'c_price',
'_iclass', 'rarity',
'_armour', '_evasion', '_es', '_life',
'_fres', '_cres', '_lres', '_chres', '_ele_res',
'_aps', '_crit', '_block',
'_dps', '_pdps', '_edps',
'_formatted_properties',
'_strength_bonus', '_dex_bonus', '_int_bonus', '_attributes_bonus')
def __init__(self, item, stash_price):
self._item = item
self.links_count = self._get_item_links_count()
self.sockets_count = len(self.sockets)
self.ilvl = item['ilvl']
self.corrupted = item['corrupted']
self.mirrored = item.get('duplicated', False)
self.identified = item['identified']
self.c_base = self.base.lower()
self.c_name = '{} {}'.format(self._get_name().lower(), self.c_base).strip()
# self.type = _ITEM_TYPE[item['frameType']]
self.type = item['frameType']
self.rarity = self.get_rarity()
self.stacksize = item.get("stackSize", 1)
self.price = self.get_price(stash_price)
self.c_price = cm.convert(*self.price) if self.price is not None else None
self.buyout = self.c_price is not None and self.c_price > 0
self.implicit = self._item.get('implicitMods', [])
self.explicit = self._item.get('explicitMods', [])
self.enchant = self._item.get('enchantMods', [])
self.craft = self._item.get('craftedMods', [])
self._mods = None
self.crafted = len(self.craft) > 0
self.enchanted = len(self.enchant) > 0
self.modcount = len(self.implicit) + len(self.explicit) + len(self.enchant) + len(self.craft)
# Properties and on-demand computed fields
self._iclass = None
self._quality = None
self._level = None
self._exp = None
self._tier = None
self._es = None
self._armour = None
self._evasion = None
self._aps = None
self._crit = None
self._block = None
self._edps = None
self._pdps = None
self._dps = None
self._attributes_bonus = None
self._strength_bonus = None
self._dex_bonus = None
self._int_bonus = None
self._life = None
self._fres = None
self._cres = None
self._lres = None
self._chres = None
self._ele_res = None
self._formatted_properties = None
@property
def mods(self):
if self._mods is None:
self._mods = list(itertools.chain(self.explicit, self.implicit, self.enchant, self.craft))
return self._mods
@property
def modifiable(self):
return not (self.corrupted or self.mirrored)
@property
def iclass(self):
if self._iclass is None:
self._iclass = self.get_item_class()
return self._iclass
@property
def quality(self):
if self._quality is None:
quality = self.get_prop_value('Quality')
self._quality = int(quality[0][0].strip('+%')) if quality else 0
return self._quality
@property
def level(self):
if self._level is None:
level = self.get_prop_value('Level')
self._level = float(level[0][0].split()[0]) if level else 0
return self._level
@property
def tier(self):
if self._tier is None:
tier = self.get_prop_value('Map Tier')
self._tier = int(tier[0][0]) if tier else 0
return self._tier
@property
def exp(self):
if self._exp is None:
exp = self.get_item_prop('Experience')
self._exp = float(exp['progress']) * 100 if exp else 0
return self._exp
@property
def es(self):
if self._es is None:
val = self.get_prop_value('Energy Shield')
self._es = self.get_item_es(self.quality, self.modifiable,
self.mods, float(val[0][0])) if val else 0
return self._es
@property
def armour(self):
if self._armour is None:
armour = self.get_prop_value('Armour')
self._armour = self.get_item_armour(self.quality, self.modifiable,
self.mods, float(armour[0][0])) if armour else 0
return self._armour
@property
def evasion(self):
if self._evasion is None:
val = self.get_prop_value('Evasion Rating')
self._evasion = self.get_item_evasion(self.quality, self.modifiable,
self.mods, float(val[0][0])) if val else 0
return self._evasion
@property
def edps(self):
if self._edps is None:
self._fill_dps()
return self._edps
@property
def pdps(self):
if self._pdps is None:
self._fill_dps()
return self._pdps
@property
def dps(self):
if self._dps is None:
self._fill_dps()
return self._dps
@property
def aps(self):
if self._aps is None:
aps = self.get_prop_value('Attacks per Second')
self._aps = float(aps[0][0]) if aps else 0
return self._aps
@property
def crit(self):
if self._crit is None:
crit = self.get_prop_value('Critical Strike Chance')
self._crit = float(crit[0][0].strip('%')) if crit else 0
return self._crit
@property
def block(self):
if self._block is None:
block = self.get_prop_value('Chance to Block')
self._block = float(block[0][0].strip('%')) if block else 0
return self._block
def _fill_dps(self):
if self.aps:
pavg, eavg, cavg = self.get_prop_value('Physical Damage'), \
self.get_prop_value('Elemental Damage'), self.get_prop_value('Chaos Damage')
if pavg:
pavg = sum((float(i) for i in pavg[0][0].split('-'))) / 2
self._pdps = self.get_item_pdps(self.quality, self.modifiable, self.mods, pavg, self.aps)
else:
self._pdps = 0
self._edps = sum((float(i) for i in eavg[0][0].split('-'))) / 2 * self.aps if eavg else 0
cavg = sum((float(i) for i in cavg[0][0].split('-')))/2 if cavg else 0
self._dps = self._pdps + self._edps + cavg * self.aps
else:
self._dps = 0
self._pdps = 0
self._edps = 0
@property
def fres(self):
if self._fres is None:
self._fres = Item.get_mod_total(fire_res_expr, self.mods)
return self._fres
@property
def cres(self):
if self._cres is None:
self._cres = Item.get_mod_total(cold_res_expr, self.mods)
return self._cres
@property
def lres(self):
if self._lres is None:
self._lres = Item.get_mod_total(lightning_res_expr, self.mods)
return self._lres
@property
def chres(self):
if self._chres is None:
self._chres = Item.get_mod_total(chaos_res_expr, self.mods)
return self._chres
@property
def ele_res(self):
if self._ele_res is None:
self._ele_res = Item.get_mod_total(ele_res_expr, self.mods)
return self._ele_res
@property
def strength_bonus(self):
if self._strength_bonus is None:
self._strength_bonus = Item.get_mod_total(strength_expr, self.mods)
return self._strength_bonus
@property
def dex_bonus(self):
if self._dex_bonus is None:
self._dex_bonus = Item.get_mod_total(dex_expr, self.mods)
return self._dex_bonus
@property
def int_bonus(self):
if self._int_bonus is None:
self._int_bonus = Item.get_mod_total(int_expr, self.mods)
return self._int_bonus
@property
def attributes_bonus(self):
if self._attributes_bonus is None:
self._attributes_bonus = Item.get_mod_total(attributes_expr, self.mods)
return self._attributes_bonus
@property
def life(self):
if self._life is None:
self._life = Item.get_mod_total(life_expr, self.mods)
return self._life
@property
def formatted_properties(self):
if self._formatted_properties is None:
self._formatted_properties = \
[ItemProperty.format_property(prop['name'], prop['values'])
for prop in self.properties
if prop['displayMode'] == PropDisplayMode.Format]
return self._formatted_properties
@property
def prophecy(self):
return self._item.get('prophecyText', '')
@property
def w(self):
return self._item['w']
@property
def h(self):
return self._item['h']
@property
def x(self):
return self._item['x']
@property
def y(self):
return self._item['y']
@property
def league(self):
return self._item['league']
@property
def utility(self):
return self._item.get('utilityMods', [])
@property
def icon(self):
return self._item['icon']
@property
def requirements(self):
return self._item.get('requirements', [])
@property
def properties(self):
return self._item.get('properties', [])
@property
def additional_properties(self):
return self._item.get('additionalProperties', [])
@property
def note(self):
return self._item.get('note', '')
@property
def base(self):
return _LOCALIZATION_REGEX.sub('', self._item['typeLine'])
@property
def name(self):
return '{} {}'.format(self._get_name(), self.base).strip()
@property
def sockets(self):
return self._item['sockets']
@property
def id(self):
return self._item['id']
def _get_name(self):
return _LOCALIZATION_REGEX.sub('', self._item['name'])
def _get_item_links_count(self):
groups = array('I', [0]) * 6
for socket in self.sockets:
groups[socket['group']] += 1
return max(groups)
def get_item_prop(self, name):
for prop in itertools.chain(self.properties, self.additional_properties):
if prop['name'] == name:
return prop
return None
def get_prop_value(self, name):
prop = self.get_item_prop(name)
if prop:
return prop['values']
return None
# def get_property_value(self, name):
# vals = get_prop_value(self._item, name)
# if vals:
# vals = [val[0] for val in vals]
# return vals
def get_rarity(self):
try:
return ItemRarity(self.type)
except ValueError:
return ItemRarity.Normal
@staticmethod
def get_mod_total(expr, mods, skip_vals=False):
total = 0
matched = False
if not expr.groups:
skip_vals = True
for mod in mods:
match = expr.match(mod)
if match:
if skip_vals:
return 1
matched = True
for val in match.groups():
total += float(val)
# return total / expr.groups
if matched:
return total / expr.groups
return 0
# return None maybe do this to allow differentiation between unmatched and a total of 0
def get_item_links_string(self):
links = ''
link_group = None
for socket in self.sockets:
if link_group is not None:
links += '-' if link_group == socket['group'] else ' '
links += socket['attr']
link_group = socket['group']
return links
@staticmethod
def get_item_pdps(quality, modifiable, mods, pavg, aps):
if not modifiable or quality == 20:
return pavg * aps
total = 0
for mod in mods:
match = phys_expr.match(mod)
if match:
total += float(match.group(1))
return pavg * (120 + total) / (quality + 100 + total) * aps
@staticmethod
def get_item_es(quality, modifiable, mods, es):
if not modifiable or quality == 20:
return es
total = 0
for mod in mods:
match = es_expr.match(mod)
if match:
total += float(match.group(1))
return es * (120 + total) / (quality + 100 + total)
@staticmethod
def get_item_armour(quality, modifiable, mods, armour):
if not modifiable or quality == 20:
return armour
total = 0
for mod in mods:
match = armour_expr.match(mod)
if match:
total += float(match.group(1))
return armour * (120 + total) / (quality + 100 + total)
@staticmethod
def get_item_evasion(quality, modifiable, mods, evasion):
if not modifiable or quality == 20:
return evasion
total = 0
for mod in mods:
match = evasion_expr.match(mod)
if match:
total += float(match.group(1))
return evasion * (120 + total) / (quality + 100 + total)
def get_item_class(self):
global superior_expr
base_line = superior_expr.sub('', self.base, 1)
item_class = ItemClass(0)
try:
# this will fail for magic items with affixes since we dont strip those
item_class = ItemClass[ItemCollection.base_type_to_id[base_line]]
except KeyError:
match = dir_expr.match(self.icon)
# seems to be accurate for the remaining cases
if match:
item_dirs = re.split(r'[/\\]', match.group(1))[:-1]
for item_dir in item_dirs:
class_id = dir_to_id.get(item_dir)
if class_id:
item_class = ItemClass[class_id]
break
# not all flasks have a traditional link
elif 'Flask' in base_line:
item_class = ItemClass.Flask
if not item_class:
logger.warn('Failed determining item class. item: {}, base_line: {}, link {}'.format(self.name, base_line, self.icon))
return item_class
def get_item_base(self):
if self.iclass:
bases = ItemCollection.get_base_types_by_class(self.iclass)
typeLine = self._item['typeLine']
for base in bases:
if re.search(r'\b{}\b'.format(base), typeLine):
return base
return None
def get_max_sockets(self):
""" ignores item type, only considers ilvl """
if self.ilvl >= 50:
return 6
if self.ilvl >= 35:
return 5
if self.ilvl >= 25:
return 4
if self.ilvl >= 2:
return 3
return 2
def get_type_max_sockets(self):
iclass = self.iclass
# if self.name in ItemCollection.SIX_LINK_EXCEPTIONS:
# return 6
if (ItemClass.OneHandWeapon | ItemClass.Shield) & iclass == iclass:
return 3
if (ItemClass.BodyArmour | ItemClass.TwoHandWeapon) & iclass == iclass:
return 6
if (ItemClass.Helmet | ItemClass.Boots | ItemClass.Gloves) & iclass == iclass:
return 4
# if iclass & (ItemClass.Ring | ItemClass.Amulet) != 0:
if (ItemClass.Ring | ItemClass.Amulet) & iclass == iclass:
return 1 # Unset Ring, and Black Maw Talisman
return 0
def get_item_price_raw(self):
if get_price(self.note):
return self.note
return None
def get_price(self, stash_price):
price = get_price(self.note)
return price if price is not None else stash_price
def get_price_raw(self, stash_raw_price):
raw_price = self.get_item_price_raw()
if raw_price is not None:
return raw_price
return stash_raw_price
# TODO MOVE?
def get_item_price_whisper(self):
# Returns format of {amount} {currency}
price = self.price
if price is not None:
amount, currency = price
return dround(amount), cm.toWhisper(currency)
return None
# TODO MOVE?
def get_item_price_display(self):
# Returns format of {amount} {currency}
price = self.price
if price is not None:
amount, currency = price
return dround(amount), cm.toFull(currency)
return None
# TODO MOVE?
def get_whisper_msg(self, stash):
template = '@{} Hi, I would like to buy your {}{} listed{} in {} (stash tab \"{}\"; position: left {}, top {})'
price = self.get_item_price_whisper()
price_str = ' for {} {}'.format(*price) if price is not None else ''
# stack_size_str = ''
# stack_size_str = '' if self.stacksize == 1 else str(self.stacksize) + ' '
if self.iclass and ItemClass.Gem & self.iclass == self.iclass:
gem_text = 'level {} {}% '.format(int(self.level), self.quality)
else:
gem_text = ''
return template.format(stash['lastCharacterName'], gem_text, self.name,
price_str, self.league, stash['stash'],
self.x + 1, self.y + 1)
class PropValueType(IntEnum):
WhiteOrPhysical = 0
BlueOrModified = 1
Fire = 4
Cold = 5
Lightning = 6
Chaos = 7
class PropDisplayMode(IntEnum):
Normal = 0
StatReq = 1
Progress = 2
Format = 3
class ItemType(IntEnum):
Normal = 0
Magic = 1
Rare = 2
Unique = 3
Gem = 4
Currency = 5
DivinationCard = 6
QuestItem = 7
Prophecy = 8
Relic = 9
class ItemRarity(IntEnum):
Normal = ItemType.Normal
Magic = ItemType.Magic
Rare = ItemType.Rare
Unique = ItemType.Unique
Relic = ItemType.Relic
class ItemProperty:
class PropertyValue:
def __init__(self, val):
self.val = val[0]
# try:
self.type = PropValueType(val[1])
# except ValueError:
# self.type = PropValueType.WhiteOrPhysical
def __init__(self, prop):
self.values = [ItemProperty.PropertyValue(val) for val in prop.get('values', [])]
# try:
self.display_mode = PropDisplayMode(prop['displayMode'])
# except ValueError:
# self.display_mode = PropDisplayMode.Normal
self.name = prop['name']
self.progress = prop.get('progress')
def format(self):
format_string = re.sub('%[0-9]+', '{}', self.name)
return format_string.format(*[pv.val for pv in self.values])
@staticmethod
def format_property(name, values):
format_string = re.sub('%[0-9]+', '{}', name)
return format_string.format(*[val[0] for val in values])
class ItemSocketType(Enum):
Strength = 'S'
Dexterity = 'D'
Intelligence = 'I'
Generic = 'G'
class ItemSocket:
def __init__(self, socket):
self.type = ItemSocketType(socket['attr'])
self.group = socket['group'] | gpl-3.0 | -404,214,246,043,495,740 | 29.98862 | 130 | 0.54843 | false |
TheCoSMoCompany/biopredyn | Prototype/src/libsbml-5.10.0/examples/python/arrays/CreateArrays2.py | 1 | 2436 | from libsbml import *
arraysNs = ArraysPkgNamespaces();
doc = SBMLDocument(arraysNs);
doc.setPackageRequired("arrays", True);
model = doc.createModel();
# create parameters
param = model.createParameter();
param.setId("n");
param.setValue(10);
param.setConstant(True);
param = model.createParameter();
param.setId("m");
param.setValue(10);
param.setConstant(True);
param = model.createParameter();
param.setId("x");
param.setValue(5.7);
param.setConstant(True);
paramPlugin = param.getPlugin("arrays");
dim = paramPlugin.createDimension();
dim.setId("i");
dim.setSize("n");
param = model.createParameter();
param.setId("y");
param.setConstant(False);
paramPlugin = param.getPlugin("arrays");
dim = paramPlugin.createDimension();
dim.setId("i");
dim.setSize("n");
param = model.createParameter();
param.setId("z");
param.setConstant(False);
paramPlugin = param.getPlugin("arrays");
dim = paramPlugin.createDimension();
dim.setId("i");
dim.setSize("n");
# create initial assignments
assignment = model.createInitialAssignment();
assignment.setSymbol("y");
ast = ASTNode(AST_REAL);
ast.setValue(3.2);
assignment.setMath(ast);
assignment = model.createInitialAssignment();
assignment.setSymbol("z");
ast = ASTNode(AST_REAL);
ast.setValue(5.7);
assignment.setMath(ast);
assignmentPlugin = assignment.getPlugin("arrays");
dim = assignmentPlugin.createDimension();
dim.setId("i");
dim.setSize("m");
index = assignmentPlugin.createIndex();
newAst = NewASTNode(AST_ARRAYS_FUNCTION_SELECTOR);
ci1 = NewASTNode(AST_NAME);
ci1.setName("z");
newAst.addChild(ci1);
ci2 = NewASTNode(AST_NAME);
ci2.setName("i");
newAst.addChild(ci2);
index.setMath(newAst);
assignment = model.createInitialAssignment();
assignment.setSymbol("z");
ast = ASTNode(AST_REAL);
ast.setValue(3.2);
assignment.setMath(ast);
assignmentPlugin = assignment.getPlugin("arrays");
dim = assignmentPlugin.createDimension();
dim.setId("i");
dim.setSize("m");
index = assignmentPlugin.createIndex();
newAst = NewASTNode(AST_ARRAYS_FUNCTION_SELECTOR);
ci = NewASTNode(AST_NAME);
ci.setName("z");
newAst.addChild(ci);
plus = NewASTNode(AST_PLUS);
ci1 = NewASTNode(AST_NAME);
ci1.setName("i");
plus.addChild(ci1);
ci2 = NewASTNode(AST_NAME);
ci2.setName("m");
plus.addChild(ci2);
newAst.addChild(plus);
index.setMath(newAst);
writeSBMLToFile(doc, "arrays2.xml");
| bsd-3-clause | 8,932,212,683,409,533,000 | 21.631068 | 50 | 0.706656 | false |
fako/datascope | src/online_discourse/management/commands/analyse_wizenoze.py | 1 | 2367 | from operator import itemgetter
from pprint import pprint
import logging
import numpy as np
from django.core.management.base import BaseCommand
from core.models import Collective
log = logging.getLogger("datascope")
class Command(BaseCommand):
"""
Base command for Community centered commands
"""
def add_arguments(self, parser):
return
def handle(self, *args, **options):
collective = Collective.objects.get(id=11577)
reading_levels = {}
level_probability_diffs = {}
missing_audience = 0
missing_probabilities = 0
for individual in collective.individual_set.all():
if "audience" not in individual.properties:
missing_audience += 1
continue
if individual["audience_probabilities"] is None:
missing_probabilities += 1
continue
audience = individual["audience"]
if audience["level"] < 4 and "argument_score" in individual.properties:
print(audience["level"], individual["argument_score"], individual["url"])
audience_propabilities = {
individual["audience"]["level"]: individual["probability"]
for individual in individual["audience_probabilities"]
}
level_probabilities = dict(sorted(audience_propabilities.items(), key=itemgetter(0), reverse=True))
if audience["label"] not in reading_levels:
reading_levels[audience["label"]] = 1
else:
reading_levels[audience["label"]] += 1
for level, probability in level_probabilities.items():
if level == 1:
continue
if level not in level_probability_diffs:
level_probability_diffs[level] = [level_probabilities[level-1]]
else:
level_probability_diffs[level].append(level_probabilities[level-1])
for level, diff in level_probability_diffs.items():
level_probability_diffs[level] = np.mean(diff)
print("Missing audience is {} and missing probabilities is {}, while total is {}".format(
missing_audience, missing_probabilities, collective.individual_set.count()))
pprint(reading_levels)
pprint(level_probability_diffs) | gpl-3.0 | -7,041,660,738,028,607,000 | 38.466667 | 111 | 0.60921 | false |
SoCdesign/EHA | Tools/Minimization_Tool/find_problematic_checkers.py | 1 | 1772 | # copyright 2016 Siavoosh Payandeh Azad and Behrad Niazmand
import package_file
from area_coverage_calc import calculate_area, calculate_coverage
from file_generator import generate_specific_file
import copy
from for_testing import gen_dummy_dict
def find_problematic_checkers():
if package_file.test_mode:
package_file.area_coverage_results = copy.deepcopy(gen_dummy_dict())
print "----------------------------------"
print "Starting checks for problematic checkers..."
problematic_checker_detected = False
checked_checkers = []
counter = 0
for item_1 in range(1, package_file.number_of_checkers+1):
print "starting calculation for item", item_1
initial_list = [str(item_1)]
generate_specific_file(initial_list)
calculate_area(initial_list)
initial_coverage = calculate_coverage(initial_list)
for item_2 in range(1, package_file.number_of_checkers+1):
if item_1 != item_2:
if (item_1, item_2) not in checked_checkers:
counter += 1
new_list = [str(item_1), str(item_2)]
print "checking round:", counter
checked_checkers.append((item_1, item_2))
generate_specific_file(new_list)
calculate_area(new_list)
new_coverage = calculate_coverage(new_list)
if new_coverage == initial_coverage:
print "\033[91m* NOTE::\033[0m PROBLEMATIC SET OF CHECKERS DETECTED:", \
item_2, " IS DOMINATED BY", item_1
problematic_checker_detected = True
return problematic_checker_detected
| gpl-3.0 | -6,955,556,709,692,845,000 | 44.435897 | 100 | 0.580135 | false |
ktmud/david | david/core/accounts/__init__.py | 1 | 1743 | # coding: utf-8
from flask.ext.security import UserMixin, RoleMixin
from config import BABEL_DEFAULT_LOCALE, BABEL_DEFAULT_TIMEZONE
from david.core.mapper import add_kind
from david.core.db import db, UidMixin
from david.lib.mixins.props import PropsMixin, PropsItem
from flask.ext.security import SQLAlchemyUserDatastore
# Define models
roles_users = db.Table(
'roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))
)
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
desc = db.Column(db.String(255))
K_USER = 100
class User(db.Model, UserMixin, UidMixin, PropsMixin):
kind = K_USER
kins_name = 'user'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
last_login_at = PropsItem('ll')
current_login_at = PropsItem('cl')
last_login_ip = PropsItem('llip')
current_login_ip = PropsItem('clip')
login_count = PropsItem('lc')
name = PropsItem('name')
locale = PropsItem('locale', BABEL_DEFAULT_LOCALE)
timezone = PropsItem('timezone', BABEL_DEFAULT_TIMEZONE)
@property
def display_name(self):
return self.name or (self.uid if self.uid else self.email.split('@')[0])
Account = User
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
add_kind(K_USER, User)
| mit | 3,447,524,261,894,055,000 | 30.690909 | 80 | 0.667814 | false |
JaneliaSciComp/osgpyplusplus | examples/rough_translated1/osgscalarbar.py | 1 | 5130 | #!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgscalarbar"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgSim
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osgscalarbar.cpp'
# OpenSceneGraph example, osgscalarbar.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osg/Geode>
#include <osg/ShapeDrawable>
#include <osg/Material>
#include <osg/Texture2D>
#include <osg/MatrixTransform>
#include <osg/PositionAttitudeTransform>
#include <osg/BlendFunc>
#include <osg/ClearNode>
#include <osg/Projection>
#include <osgUtil/CullVisitor>
#include <osgGA/TrackballManipulator>
#include <osgViewer/Viewer>
#include <osgDB/ReadFile>
#include <osgSim/ScalarsToColors>
#include <osgSim/ColorRange>
#include <osgSim/ScalarBar>
#include <sstream>
#include <iostream>
#include <math.h>
using namespace osgSim
using osgSim.ScalarBar
#if defined(_MSC_VER)
# not have to have this pathway for just VS6.0 as its unable to handle the full
# ScalarBar.ScalarPrinter.printScalar scoping.
# Create a custom scalar printer
class MyScalarPrinter (ScalarBar.ScalarPrinter) :
def printScalar(scalar):
print "In MyScalarPrinter.printScalar"
if scalar==0.0 : return ScalarPrinter.printScalar(scalar)+" Bottom"
elif scalar==0.5 : return ScalarPrinter.printScalar(scalar)+" Middle"
elif scalar==1.0 : return ScalarPrinter.printScalar(scalar)+" Top"
else return ScalarPrinter.printScalar(scalar)
#else:
# Create a custom scalar printer
class MyScalarPrinter (ScalarBar.ScalarPrinter) :
def printScalar(scalar):
print "In MyScalarPrinter.printScalar"
if scalar==0.0 : return ScalarBar.ScalarPrinter.printScalar(scalar)+" Bottom"
elif scalar==0.5 : return ScalarBar.ScalarPrinter.printScalar(scalar)+" Middle"
elif scalar==1.0 : return ScalarBar.ScalarPrinter.printScalar(scalar)+" Top"
else return ScalarBar.ScalarPrinter.printScalar(scalar)
#endif
def createScalarBar():
#if 1
#ScalarsToColors* stc = ScalarsToColors(0.0,1.0)
#ScalarBar* sb = ScalarBar(2,3,stc,"STC_ScalarBar")
# Create a custom color set
cs = std.vector<osg.Vec4>()
cs.push_back(osg.Vec4(1.0,0.0,0.0,1.0)) # R
cs.push_back(osg.Vec4(0.0,1.0,0.0,1.0)) # G
cs.push_back(osg.Vec4(1.0,1.0,0.0,1.0)) # G
cs.push_back(osg.Vec4(0.0,0.0,1.0,1.0)) # B
cs.push_back(osg.Vec4(0.0,1.0,1.0,1.0)) # R
cr = ColorRange(0.0,1.0,cs)
sb = ScalarBar(20, 11, cr, "ScalarBar", ScalarBar.VERTICAL, 0.1, MyScalarPrinter)()
sb.setScalarPrinter(MyScalarPrinter)()
return sb
#else:
sb = ScalarBar()
tp = ScalarBar.TextProperties()
tp._fontFile = "fonts/times.ttf"
sb.setTextProperties(tp)
return sb
#endif
def createScalarBar_HUD():
geode = osgSim.ScalarBar()
tp = osgSim.ScalarBar.TextProperties()
tp._fontFile = "fonts/times.ttf"
geode.setTextProperties(tp)
stateset = geode.getOrCreateStateSet()
stateset.setMode(GL_LIGHTING, osg.StateAttribute.OFF)
stateset.setMode(GL_DEPTH_TEST,osg.StateAttribute.OFF)
stateset.setRenderBinDetails(11, "RenderBin")
modelview = osg.MatrixTransform()
modelview.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
matrix = osg.Matrixd(osg.Matrixd.scale(1000,1000,1000) * osg.Matrixd.translate(120,10,0)) # I've played with these values a lot and it seems to work, but I have no idea why
modelview.setMatrix(matrix)
modelview.addChild(geode)
projection = osg.Projection()
projection.setMatrix(osg.Matrix.ortho2D(0,1280,0,1024)) # or whatever the OSG window res is
projection.addChild(modelview)
return projection #make sure you delete the return sb line
int main(int , char **)
# construct the viewer.
viewer = osgViewer.Viewer()
group = osg.Group()
group.addChild(createScalarBar())
group.addChild(createScalarBar_HUD())
# add model to viewer.
viewer.setSceneData( group )
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause | -8,399,547,675,412,330,000 | 30.472393 | 176 | 0.719688 | false |
koss822/misc | Linux/MySettings/myvim/vim/bundle/jedi-vim/pythonx/jedi/test/test_api/test_full_name.py | 1 | 3261 | """
Tests for :attr:`.BaseDefinition.full_name`.
There are three kinds of test:
#. Test classes derived from :class:`MixinTestFullName`.
Child class defines :attr:`.operation` to alter how
the api definition instance is created.
#. :class:`TestFullDefinedName` is to test combination of
``obj.full_name`` and ``jedi.defined_names``.
#. Misc single-function tests.
"""
import textwrap
import pytest
import jedi
from ..helpers import TestCase, cwd_at
class MixinTestFullName(object):
operation = None
@pytest.fixture(autouse=True)
def init(self, Script, environment):
self.Script = Script
self.environment = environment
def check(self, source, desired):
script = self.Script(textwrap.dedent(source))
definitions = getattr(script, type(self).operation)()
for d in definitions:
self.assertEqual(d.full_name, desired)
def test_os_path_join(self):
self.check('import os; os.path.join', 'os.path.join')
def test_builtin(self):
self.check('TypeError', 'TypeError')
class TestFullNameWithGotoDefinitions(MixinTestFullName, TestCase):
operation = 'goto_definitions'
def test_tuple_mapping(self):
if self.environment.version_info.major == 2:
pytest.skip('Python 2 also yields None.')
self.check("""
import re
any_re = re.compile('.*')
any_re""", '_sre.SRE_Pattern')
def test_from_import(self):
self.check('from os import path', 'os.path')
class TestFullNameWithCompletions(MixinTestFullName, TestCase):
operation = 'completions'
class TestFullDefinedName(TestCase):
"""
Test combination of ``obj.full_name`` and ``jedi.defined_names``.
"""
@pytest.fixture(autouse=True)
def init(self, environment):
self.environment = environment
def check(self, source, desired):
definitions = jedi.names(textwrap.dedent(source), environment=self.environment)
full_names = [d.full_name for d in definitions]
self.assertEqual(full_names, desired)
def test_local_names(self):
self.check("""
def f(): pass
class C: pass
""", ['__main__.f', '__main__.C'])
def test_imports(self):
self.check("""
import os
from os import path
from os.path import join
from os import path as opath
""", ['os', 'os.path', 'os.path.join', 'os.path'])
def test_sub_module(Script, jedi_path):
"""
``full_name needs to check sys.path to actually find it's real path module
path.
"""
sys_path = [jedi_path]
defs = Script('from jedi.api import classes; classes', sys_path=sys_path).goto_definitions()
assert [d.full_name for d in defs] == ['jedi.api.classes']
defs = Script('import jedi.api; jedi.api', sys_path=sys_path).goto_definitions()
assert [d.full_name for d in defs] == ['jedi.api']
def test_os_path(Script):
d, = Script('from os.path import join').completions()
assert d.full_name == 'os.path.join'
d, = Script('import os.p').completions()
assert d.full_name == 'os.path'
def test_os_issues(Script):
"""Issue #873"""
c, = Script('import os\nos.nt''').completions()
assert c.full_name == 'nt'
| gpl-3.0 | 3,072,933,680,588,718,600 | 27.356522 | 96 | 0.636615 | false |
anpingli/openshift-ansible | roles/lib_openshift/library/oc_process.py | 1 | 58519 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import time
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/process -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_process
short_description: Module to process openshift templates
description:
- Process openshift templates programmatically.
options:
state:
description:
- State has a few different meanings when it comes to process.
- state: present - This state runs an `oc process <template>`. When used in
- conjunction with 'create: True' the process will be piped to | oc create -f
- state: absent - will remove a template
- state: list - will perform an `oc get template <template_name>`
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
template_name:
description:
- Name of the openshift template that is being processed.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the template lives.
required: false
default: default
aliases: []
content:
description:
- Template content that will be processed.
required: false
default: None
aliases: []
params:
description:
- A list of parameters that will be inserted into the template.
required: false
default: None
aliases: []
create:
description:
- Whether or not to create the template after being processed. e.g. oc process | oc create -f -
required: False
default: False
aliases: []
reconcile:
description:
- Whether or not to attempt to determine if there are updates or changes in the incoming template.
default: true
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: process the cloud volume provisioner template with variables
oc_process:
namespace: openshift-infra
template_name: online-volume-provisioner
create: True
params:
PLAT: rhel7
register: processout
run_once: true
- debug: var=processout
'''
# -*- -*- -*- End included fragment: doc/process -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, '{}.{}'.format(self.filename, time.strftime("%Y%m%dT%H%M%S")))
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
try:
# AUDIT:maybe-no-member makes sense due to different yaml libraries
# pylint: disable=maybe-no-member
curr_value = yaml.safe_load(invalue, Loader=yaml.RoundTripLoader)
except AttributeError:
curr_value = yaml.safe_load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-p')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_process.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCProcess(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5. we need 6
# pylint: disable=too-many-arguments
def __init__(self,
namespace,
tname=None,
params=None,
create=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
tdata=None,
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCProcess, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = tname
self.data = tdata
self.params = params
self.create = create
self._template = None
@property
def template(self):
'''template property'''
if self._template is None:
results = self._process(self.name, False, self.params, self.data)
if results['returncode'] != 0:
raise OpenShiftCLIError('Error processing template [%s]: %s' %(self.name, results))
self._template = results['results']['items']
return self._template
def get(self):
'''get the template'''
results = self._get('template', self.name)
if results['returncode'] != 0:
# Does the template exist??
if 'not found' in results['stderr']:
results['returncode'] = 0
results['exists'] = False
results['results'] = []
return results
def delete(self, obj):
'''delete a resource'''
return self._delete(obj['kind'], obj['metadata']['name'])
def create_obj(self, obj):
'''create a resource'''
return self._create_from_content(obj['metadata']['name'], obj)
def process(self, create=None):
'''process a template'''
do_create = False
if create != None:
do_create = create
else:
do_create = self.create
return self._process(self.name, do_create, self.params, self.data)
def exists(self):
'''return whether the template exists'''
# Always return true if we're being passed template data
if self.data:
return True
t_results = self._get('template', self.name)
if t_results['returncode'] != 0:
# Does the template exist??
if 'not found' in t_results['stderr']:
return False
else:
raise OpenShiftCLIError('Something went wrong. %s' % t_results)
return True
def needs_update(self):
'''attempt to process the template and return it for comparison with oc objects'''
obj_results = []
for obj in self.template:
# build a list of types to skip
skip = []
if obj['kind'] == 'ServiceAccount':
skip.extend(['secrets', 'imagePullSecrets'])
if obj['kind'] == 'BuildConfig':
skip.extend(['lastTriggeredImageID'])
if obj['kind'] == 'ImageStream':
skip.extend(['generation'])
if obj['kind'] == 'DeploymentConfig':
skip.extend(['lastTriggeredImage'])
# fetch the current object
curr_obj_results = self._get(obj['kind'], obj['metadata']['name'])
if curr_obj_results['returncode'] != 0:
# Does the template exist??
if 'not found' in curr_obj_results['stderr']:
obj_results.append((obj, True))
continue
# check the generated object against the existing object
if not Utils.check_def_equal(obj, curr_obj_results['results'][0], skip_keys=skip):
obj_results.append((obj, True))
continue
obj_results.append((obj, False))
return obj_results
# pylint: disable=too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocprocess = OCProcess(params['namespace'],
params['template_name'],
params['params'],
params['create'],
kubeconfig=params['kubeconfig'],
tdata=params['content'],
verbose=params['debug'])
state = params['state']
api_rval = ocprocess.get()
if state == 'list':
if api_rval['returncode'] != 0:
return {"failed": True, "msg" : api_rval}
return {"changed" : False, "results": api_rval, "state": state}
elif state == 'present':
if check_mode and params['create']:
return {"changed": True, 'msg': "CHECK_MODE: Would have processed template."}
if not ocprocess.exists() or not params['reconcile']:
#FIXME: this code will never get run in a way that succeeds when
# module.params['reconcile'] is true. Because oc_process doesn't
# create the actual template, the check of ocprocess.exists()
# is meaningless. Either it's already here and this code
# won't be run, or this code will fail because there is no
# template available for oc process to use. Have we conflated
# the template's existence with the existence of the objects
# it describes?
# Create it here
api_rval = ocprocess.process()
if api_rval['returncode'] != 0:
return {"failed": True, "msg": api_rval}
if params['create']:
return {"changed": True, "results": api_rval, "state": state}
return {"changed": False, "results": api_rval, "state": state}
# verify results
update = False
rval = []
all_results = ocprocess.needs_update()
for obj, status in all_results:
if status:
ocprocess.delete(obj)
results = ocprocess.create_obj(obj)
results['kind'] = obj['kind']
rval.append(results)
update = True
if not update:
return {"changed": update, "results": api_rval, "state": state}
for cmd in rval:
if cmd['returncode'] != 0:
return {"failed": True, "changed": update, "msg": rval, "state": state}
return {"changed": update, "results": rval, "state": state}
# -*- -*- -*- End included fragment: class/oc_process.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_process.py -*- -*- -*-
def main():
'''
ansible oc module for processing templates
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str', choices=['present', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
template_name=dict(default=None, type='str'),
content=dict(default=None, type='str'),
params=dict(default=None, type='dict'),
create=dict(default=False, type='bool'),
reconcile=dict(default=True, type='bool'),
),
supports_check_mode=True,
)
rval = OCProcess.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_process.py -*- -*- -*-
| apache-2.0 | 5,728,075,993,258,235,000 | 33.60615 | 118 | 0.533929 | false |
fusionbox/buggy | buggy/views.py | 1 | 10158 | import json
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView, FormView, View
from django.http import Http404, HttpResponseRedirect, HttpResponse, HttpResponseForbidden
from django.shortcuts import redirect
from django.db.models import Prefetch
from django.db import transaction
from django.utils.functional import cached_property
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
from django.conf import settings
from django.template.defaultfilters import capfirst, pluralize
from .models import Bug, Action, Comment
from .forms import FilterForm, PresetFilterForm
from .mutation import BuggyBugMutator
from .enums import State, Priority
from . import webhook
User = get_user_model()
class BugListView(LoginRequiredMixin, ListView):
ORDER_FIELDS = {
'number': 'id',
'project': 'project__name',
'bug': 'title',
'modified': 'modified_at',
'creator': 'created_by__name',
'assigned_to': 'assigned_to__name',
'state': 'state',
'priority': 'priority',
}
mutator_class = BuggyBugMutator
queryset = Bug.objects.select_related(
'project', 'created_by', 'assigned_to'
).order_by(
'-modified_at'
).defer('fulltext') # We don't use the column, so there's no need to detoast a long string.
context_object_name = 'bugs'
form_class = FilterForm
def get_form_kwargs(self):
return {
'data': self.request.GET,
'label_suffix': '',
}
def get_form(self):
return self.form_class(**self.get_form_kwargs())
def get_bulk_action_form_kwargs(self):
kwargs = {
'queryset': self.object_list,
'bug_actions': self.get_bug_actions(),
}
if self.request.POST:
kwargs['data'] = self.request.POST
return kwargs
def get_bulk_action_form(self):
form_class = self.mutator_class.get_bulk_action_form_class()
return form_class(**self.get_bulk_action_form_kwargs())
def get(self, *args, **kwargs):
self.form = self.get_form()
return super().get(*args, **kwargs)
def post(self, *args, **kwargs):
self.form = self.get_form()
self.object_list = self.get_queryset()
bulk_action_form = self.get_bulk_action_form()
errors = None
if bulk_action_form.is_valid():
try:
with transaction.atomic():
for bug in bulk_action_form.cleaned_data['bugs']:
state_machine = self.mutator_class(self.request.user, bug)
state_machine.process_action(bulk_action_form.cleaned_data)
except ValidationError as e:
errors = e
else:
errors = sum(bulk_action_form.errors.values(), [])
if errors:
for error in errors:
messages.error(self.request, 'Bulk Action Failed: {}'.format(error))
else:
bug_count = len(bulk_action_form.cleaned_data['bugs'])
messages.success(
self.request,
'Success: {} bug{} updated'.format(bug_count, pluralize(bug_count)),
)
return HttpResponseRedirect(self.request.get_full_path())
def get_bug_actions(self):
bug_actions = {}
for bug in self.object_list:
mutator = self.mutator_class(self.request.user, bug)
action_choices = mutator.action_choices(mutator.get_actions())
bug_actions[bug.number] = [x[0] for x in action_choices]
return bug_actions
def get_sort_links(self):
sort_links = {}
querydict = self.request.GET.copy()
if '_pjax' in querydict:
del querydict['_pjax'] # pjax adds this param for cache purposes.
current_sort, desc = self.sort_type()
for order_field in self.ORDER_FIELDS.keys():
if 'desc' in querydict:
del querydict['desc']
if current_sort == order_field and not desc:
querydict['desc'] = True
querydict['sort'] = order_field
sort_links[order_field] = '?{}'.format(querydict.urlencode())
return sort_links
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if 'bulk_action_form' not in kwargs:
context['bulk_action_form'] = self.get_bulk_action_form()
context['form'] = self.form
context['bulk_actions'] = self.mutator_class.get_bulk_actions()
context['preset_form'] = PresetFilterForm(label_suffix='')
context['sort_links'] = self.get_sort_links()
context['sort_by'], context['sort_desc'] = self.sort_type()
return context
def get_queryset(self):
qs = super().get_queryset()
if self.form.is_valid():
qs = self.form.filter(qs)
order_field, desc = self.sort_type()
return qs.order_by(('-' if desc else '') + self.ORDER_FIELDS[order_field])
else:
return qs.none()
def get_template_names(self):
if self.request.META.get('HTTP_X_PJAX'):
return ['buggy/_bug_list.html']
else:
return super().get_template_names()
def sort_type(self):
order_field = self.request.GET.get('sort')
if order_field not in self.ORDER_FIELDS:
return ('modified', True)
else:
return (order_field, bool(self.request.GET.get('desc')))
class BugMutationMixin(LoginRequiredMixin):
mutator_class = BuggyBugMutator
@cached_property
def state_machine(self):
return self.mutator_class(self.request.user, self.object)
def get_form_class(self):
return self.state_machine.get_form_class()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['actions'] = self.state_machine.get_actions()
context['buggy_user_names'] = [
user.get_short_name().lower() for user in User.objects.filter(is_active=True)
]
context['buggy_open_bugs'] = [
{
'title': bug.title,
'number': bug.number,
} for bug in Bug.objects.exclude(state=State.CLOSED).defer('fulltext')
]
return context
def form_valid(self, form):
try:
action = self.state_machine.process_action(form.cleaned_data)
except ValidationError as e:
for error in e.error_list:
form.add_error(None, e)
return self.form_invalid(form)
else:
messages.success(self.request, capfirst(action.description))
return HttpResponseRedirect(action.bug.get_absolute_url())
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['label_suffix'] = ''
return kwargs
class BugDetailView(BugMutationMixin, FormView):
template_name = 'buggy/bug_detail.html'
queryset = Bug.objects.select_related(
'created_by', 'assigned_to', 'project'
)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
@transaction.atomic
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super().post(request, *args, **kwargs)
def get_object(self):
try:
if self.request.method == 'POST':
# We'd like to just use select_for_update on the main queryset,
# but the select_related does a left join. Postgres does not
# support locking the outer side of an outer join. The SQL we
# want is `SELECT ... FOR UPDATE OF buggy_bug`, which would
# only lock the one table, but Django can't yet generate that
# SQL: <https://code.djangoproject.com/ticket/28010>.
# BBB: This extra query can be replaced with
# select_for_update(of=('self',)) as soon as it's supported in
# Django.
Bug.objects.all().select_for_update().get_by_number(self.kwargs['bug_number'])
return self.queryset.get_by_number(self.kwargs['bug_number'])
except Bug.DoesNotExist as e:
raise Http404(*e.args)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['bug'] = self.object
return context
def get_initial(self):
return {
'title': self.object.title,
'priority': self.object.priority.value,
}
class BugCreateView(BugMutationMixin, FormView):
template_name = 'buggy/bug_create.html'
object = None
class AddPresetView(LoginRequiredMixin, View):
def post(self, request):
data = request.POST.copy()
data['user'] = request.user.id
form = PresetFilterForm(data)
if form.is_valid():
form.save()
else:
messages.error(request, 'Preset names must be unique.')
return redirect('buggy:bug_list')
class RemovePresetView(LoginRequiredMixin, View):
def post(self, request, pk):
request.user.presetfilter_set.filter(pk=pk).delete()
return redirect('buggy:bug_list')
class MarkdownPreviewView(LoginRequiredMixin, View):
def post(self, request):
return HttpResponse(Comment(comment=request.POST.get('preview', '')).html)
class GitCommitWebhookView(View):
def post(self, request):
if settings.GIT_COMMIT_WEBHOOK_SECRET is None or webhook.validate_signature(
settings.GIT_COMMIT_WEBHOOK_SECRET,
request.body,
request.META['HTTP_X_HUB_SIGNATURE'],
):
data = json.loads(request.body.decode('utf-8'))
for commit in data['commits']:
webhook.process_commit(commit)
return HttpResponse('', status=201)
else:
return HttpResponseForbidden('Signature does not match.')
| bsd-3-clause | 3,617,712,142,736,727,600 | 34.642105 | 96 | 0.60189 | false |
hughperkins/pub-prototyping | py/change_indent.py | 1 | 5169 | #!/usr/bin/python
"""
Copyright Hugh Perkins 2016
You can use this under the BSDv2 license
This script re-indent files, without changing git blame. It will create
a new commit for each author present in the original blame, with commit message
'automated re-indentation'
"""
import sys
import os
from os import path
import subprocess
filename=sys.argv[1]
print(subprocess.check_output([
'git', 'checkout', filename
]))
out = subprocess.check_output([
'git', 'blame', '--line-porcelain', filename
])
print('out', out)
author_info_by_email = {}
lines_by_author = {}
def process_line_info(line_info):
print(line_info)
author_email = line_info['author-mail']
if author_email not in author_info_by_email:
author_info = {}
author_info['email'] = author_email
author_info['name'] = line_info['author']
author_info_by_email[author_email] = author_info
line_num = line_info['line_num']
if author_email not in lines_by_author:
lines_by_author[author_email] = []
lines_by_author[author_email].append(line_num)
line_num = 0 # 1-based, otherwise inconsistent with all of: lua, text editors, and git blame output
line_info = {}
in_boundary = False
boundary_line = -1
for line in out.split('\n'):
key = line.split(' ')[0]
if len(key) > 39:
if len(line_info.keys()) > 0:
process_line_info(line_info)
in_boundary = False
line_num = line_num + 1
line_info = {}
line_info['line_num'] = line_num
continue
if in_boundary:
if boundary_line == 2:
line_info['contents'] = line.rstrip()[1:]
boundary_line = boundary_line + 1
else:
if key == 'boundary':
in_boundary = True
boundary_line = 1
else:
if key is not None and key != '' and len(key) < 40:
value = line.strip().replace(key + ' ', '')
if value.strip() != '':
if key in ['author', 'author-mail', 'summary']:
line_info[key] = value
if len(line_info.keys()) > 0:
process_line_info(line_info)
print(lines_by_author)
def reindent(filepath, lines, indentsize=2):
f = open(filepath, 'r')
contents = f.read()
f.close()
f = open(filepath, 'w')
indent = 0
indent = 0
nextindent = 0
line_num = 1
last_line = None
in_code_block = False
block_indent = 0
next_block_indent = 0
for line in contents.split('\n'):
original_line = line
line = line.strip()
prefix = ''
if not in_code_block:
comment_pos = line.find('--')
if comment_pos >= 0:
pc = line[:comment_pos]
comments = line[comment_pos:]
else:
pc = line
comments = ''
if '[[' in pc:
codeblock_pos = pc.find('[[')
pc = pc[:codeblock_pos]
comments = pc[codeblock_pos:]
in_code_block = True
block_indent = 0
next_block_indent = 1
if in_code_block:
if ']]' in line:
codeblock_end = line.find(']]') + 2
prefix = line[:codeblock_end]
pc = line[codeblock_end:]
in_code_block = False
comments = ''
else:
pc = ''
comments = line
if(comments.startswith('if') or comments.startswith('for ') or comments.startswith('while') or comments.startswith('function')
or comments.startswith('local function') or comments.find(' = function(') >= 0):
next_block_indent += 1
elif comments.startswith('elseif') or comments.startswith('else'):
block_indent -= 1
if comments.startswith('end') or comments.endswith('end'):
block_indent -= 1
indent += block_indent
block_indent = next_block_indent
pcs = pc.strip()
if(pcs.startswith('if') or pcs.endswith(' do') or pcs == 'do' or pcs.startswith('function')
or pcs.startswith('local function') or pcs.find(' function(') >= 0 or pcs.find('=function(') >= 0):
nextindent += 1
elif pcs.startswith('elseif') or pcs.startswith('else'):
indent -= 1
if pcs.startswith('end') or pcs.endswith('end'):
indent -= 1
nextindent -= 1
# handle brackets...
excess_brackets = pc.count('(') + pc.count('{') - pc.count(')') - pc.count('}')
nextindent += excess_brackets
if excess_brackets < 0 and (pc[0] == ')' or pc[0] == '}'):
indent = nextindent
if line_num in lines:
f.write(' ' * (indentsize * indent) + prefix + pc + comments + '\n')
else:
f.write(original_line + '\n')
indent = nextindent
last_line = line
line_num = line_num + 1
if last_line != '':
f.write('\n')
f.close()
for author_email in lines_by_author:
author_info = author_info_by_email[author_email]
print(author_info)
print(subprocess.check_output([
'git', 'config', '--local', '--add', 'user.name', author_info['name']
]))
print(subprocess.check_output([
'git', 'config', '--local', '--add', 'user.email', author_email
]))
print(subprocess.check_output([
'git', 'config', '--local', '-l'
]))
reindent(filename, lines_by_author[author_email])
print(subprocess.check_output([
'git', 'add', filename
]))
print(subprocess.check_output([
'git', 'commit', '-m', 'automated re-indentation of ' + filename
]))
| apache-2.0 | 4,955,187,367,232,464,000 | 28.706897 | 134 | 0.60089 | false |
myarjunar/QGIS | tests/src/python/test_provider_mssql.py | 1 | 3347 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the MS SQL provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2015-12-07'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.core import QgsSettings, QgsVectorLayer, QgsFeatureRequest
from qgis.PyQt.QtCore import QDate, QTime, QDateTime, QVariant
from utilities import unitTestDataPath
from qgis.testing import start_app, unittest
from providertestbase import ProviderTestCase
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestPyQgsMssqlProvider(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.dbconn = "dbname='gis' host=localhost\sqlexpress"
if 'QGIS_MSSQLTEST_DB' in os.environ:
cls.dbconn = os.environ['QGIS_MSSQLTEST_DB']
# Create test layers
cls.vl = QgsVectorLayer(
cls.dbconn + ' sslmode=disable key=\'pk\' srid=4326 type=POINT table="qgis_test"."someData" (geom) sql=', 'test', 'mssql')
assert(cls.vl.isValid())
cls.provider = cls.vl.dataProvider()
cls.poly_vl = QgsVectorLayer(
cls.dbconn + ' sslmode=disable key=\'pk\' srid=4326 type=POLYGON table="qgis_test"."some_poly_data" (geom) sql=', 'test', 'mssql')
assert(cls.poly_vl.isValid())
cls.poly_provider = cls.poly_vl.dataProvider()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def enableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', True)
def disableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', False)
# HERE GO THE PROVIDER SPECIFIC TESTS
def testDateTimeTypes(self):
vl = QgsVectorLayer('%s table="qgis_test"."date_times" sql=' %
(self.dbconn), "testdatetimes", "mssql")
assert(vl.isValid())
fields = vl.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName(
'date_field')).type(), QVariant.Date)
self.assertEqual(fields.at(fields.indexFromName(
'time_field')).type(), QVariant.Time)
self.assertEqual(fields.at(fields.indexFromName(
'datetime_field')).type(), QVariant.DateTime)
f = next(vl.getFeatures(QgsFeatureRequest()))
date_idx = vl.fields().lookupField('date_field')
assert isinstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2004, 3, 4))
time_idx = vl.fields().lookupField('time_field')
assert isinstance(f.attributes()[time_idx], QTime)
self.assertEqual(f.attributes()[time_idx], QTime(13, 41, 52))
datetime_idx = vl.fields().lookupField('datetime_field')
assert isinstance(f.attributes()[datetime_idx], QDateTime)
self.assertEqual(f.attributes()[datetime_idx], QDateTime(
QDate(2004, 3, 4), QTime(13, 41, 52)))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -6,501,090,033,941,872,000 | 37.034091 | 142 | 0.655214 | false |
tcp813/mouTools | qt/table_editor.py | 1 | 7125 | """
PyQt5.6 python3.4
Table has editor
"""
import random
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
txt = '''The ASCII character set is suitable for encoding English-language documents,but it does not have much in the way of special characters, such as the French ‘ç.’
It is wholly unsuited for encoding documents in languages such as Greek, Russian, and
Chinese. Over the years, a variety of methods have been developed to encode text for
different languages. The Unicode Consortium has devised the most comprehensive and widely
accepted standard for encoding text. The current Unicode standard (version 5.0) has a repertoire
of nearly 100,000 characters supporting languages ranging from Albanian to Xamtanga
(a language spoken by the Xamir people of Ethiopia).
# '''
# txt = '''
# The ASCII character set is suitable for encoding English-language documents, but it does not have much in the way of special characters, such as the French ‘ç.’ It is wholly unsuited for encoding documents in languages such as Greek, Russian, and Chinese. Over the years, a variety of methods have been developed to encode text for different languages. The Unicode Consortium has devised the most comprehensive and widely accepted standard for encoding text. The current Unicode standard (version 5.0) has a repertoire of nearly 100,000 characters supporting languages ranging from Albanian to Xamtanga (a language spoken by the Xamir people of Ethiopia).
# '''
class Note:
HIGHTLIGHT = 0
NOTE = 1
def __init__(self):
self.content = ''
self.date = '2017.7.21 11:11:11'
self.kind = random.sample([Note.HIGHTLIGHT, Note.NOTE], 1)[0]
def genDatas():
notes = []
for i in range(50):
note = Note()
note.content = txt[:random.randint(100, len(txt)-1)]
notes.append(note)
return notes
"""
Delegate
"""
class Delegate(QStyledItemDelegate):
def __init__(self, notes, parent=None):
QStyledItemDelegate.__init__(self, parent)
self.parent = parent
self.notes = notes
def paint(self, painter, option, index):
QStyledItemDelegate.paint(self, painter, option, index)
if index.column() == 1:
txt = self.notes[index.row()].date
txtRect = QRect(option.rect.x(), option.rect.y()+option.rect.height()-50, option.rect.width(), 50)
painter.setPen(QColor('#666666'))
painter.drawText(txtRect, Qt.AlignLeft | Qt.AlignTop | Qt.TextSingleLine, txt)
elif index.column() in [0, 2]:
painter.fillRect(option.rect, QColor('#FFFFFF'))
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect.x(), option.rect.y(), option.rect.width(), option.rect.height()-50)
# editor.setGeometry(option.rect)
def createEditor(self, parent, option, index):
# editor = QTextEdit(parent)
editor = QPlainTextEdit(parent)
return editor
def setModelData(self, editor, model, index):
model.setData(index, editor.toPlainText(), Qt.DisplayRole)
"""
Model
"""
class Model(QAbstractTableModel):
def __init__(self, notes, parent=None):
QAbstractTableModel.__init__(self, parent)
self.parent = parent
self.notes = notes
def rowCount(self, index):
return len(self.notes)
def columnCount(self, index):
return 3
def setData(self, index, value, role):
if index.column() == 1:
# if role == Qt.EditRole:
self.notes[index.row()].content = value
self.dataChanged.emit(index, index)
return True
return False
def data(self, index, role):
if role == Qt.DisplayRole:
if index.column() == 0:
return index.row()+1
elif index.column() == 1:
return self.notes[index.row()].content
elif role == Qt.EditRole:
if index.column() == 1:
return self.notes[index.row()].content
elif role == Qt.TextAlignmentRole:
if index.column() == 1:
return Qt.AlignTop | Qt.AlignLeft
# if role == Qt.BackgroundRole:
# if index.column() == 1:
# if self.notes[index.row()].kind is Note.HIGHTLIGHT:
# return QColor('#0000FF')
# elif self.notes[index.row()].kind is Note.NOTE:
# return QColor('#00FF00')
# elif role == Qt.SizeHintRole:
# return QSize(50, 100)
def flags(self, index):
if index.isValid():
if index.column() == 1:
return QAbstractItemModel.flags(self, index) | Qt.ItemIsEditable
return Qt.ItemIsEnabled
return Qt.NoItemFlags
"""
View
"""
class View(QTableView):
def __init__(self, parent=None):
QTableView.__init__(self, parent)
self.verticalHeader().setVisible(False)
self.horizontalHeader().setVisible(False)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setFocusPolicy(Qt.NoFocus)
# self.setWordWrap(False)
# self.setTextElideMode(Qt.ElideMiddle)
self.setShowGrid(False)
# def enterEvent(self, event):
# self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
#
# def leaveEvent(self, event):
# self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
class Widget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.resize(800, 600)
self.notes = genDatas()
self.view = View(self)
self.model = Model(self.notes, self)
self.delegate = Delegate(self.notes, self)
self.view.setModel(self.model)
self.view.setItemDelegate(self.delegate)
self.view.setColumnWidth(0, 70)
self.view.setColumnWidth(2, 100)
# self.view.setColumnWidth(1, 200)
# self.view.setColumnHidden(2, True)
self.view.horizontalHeader().setSectionResizeMode(1, QHeaderView.Stretch)
self.view.resizeRowsToContents()
self.view.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.view.setStyleSheet('''
QTableView::item {
margin-bottom: 50px;
border: 1px solid #D9EAFA;
}
QTableView {
selection-color: #FFFFFF;
}
QTableView::item {
border-radius: 10px;
background-color: #D9EAFA;
}
QTableView::item:selected:!active {
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #ABAFE5, stop: 1 #8588B2);
color: #FFFFFF;
border-radius: 10px;
}
QTableView::item:hover {
/*background: #FF0000;*/
border: 1px solid #FF0000;
}
''')
self.layout = QHBoxLayout(self)
self.layout.addWidget(self.view)
if __name__ == '__main__':
app = QApplication([])
w = Widget()
w.show()
app.exec_() | mit | 3,111,446,913,765,119,000 | 33.712195 | 657 | 0.626001 | false |
opethe1st/CompetitiveProgramming | Codility/5PrefixSums/GeonomicRange.py | 1 | 1181 | def solution(S, P, Q):
# write your code in Python 2.7
prefixA = [0] * (len(S) + 1)
prefixC = [0] * (len(S) + 1)
prefixG = [0] * (len(S) + 1)
prefixT = [0] * (len(S) + 1)
for i in xrange(len(S)):
if S[i] == 'A':
prefixA[i + 1] = prefixA[i] + 1
else:
prefixA[i + 1] = prefixA[i]
for i in xrange(len(S)):
if S[i] == 'C':
prefixC[i + 1] = prefixC[i] + 1
else:
prefixC[i + 1] = prefixC[i]
for i in xrange(len(S)):
if S[i] == 'G':
prefixG[i + 1] = prefixG[i] + 1
else:
prefixG[i + 1] = prefixG[i]
for i in xrange(len(S)):
if S[i] == 'T':
prefixT[i + 1] = prefixT[i] + 1
else:
prefixT[i + 1] = prefixT[i]
ans = []
for i in xrange(len(P)):
# print prefixC,Q[i],P[i]
if prefixA[Q[i] + 1] > prefixA[P[i]]:
ans.append(1)
elif prefixC[Q[i] + 1] > prefixC[P[i]]:
ans.append(2)
elif prefixG[Q[i] + 1] > prefixG[P[i]]:
ans.append(3)
elif prefixT[Q[i] + 1] > prefixT[P[i]]:
ans.append(4)
return ans
| gpl-3.0 | -6,070,044,671,206,912,000 | 29.282051 | 47 | 0.42591 | false |
magfest/ubersystem | alembic/versions/5ceaec4834aa_associate_mits_docs_and_pictures_with_.py | 1 | 3130 | """Associate MITS docs and pictures with games instead of teams
Revision ID: 5ceaec4834aa
Revises: 4036e1fdb9ee
Create Date: 2020-04-14 23:23:35.417496
"""
# revision identifiers, used by Alembic.
revision = '5ceaec4834aa'
down_revision = '4036e1fdb9ee'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import residue
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.add_column('mits_document', sa.Column('game_id', residue.UUID(), nullable=False))
op.drop_constraint('fk_mits_document_team_id_mits_team', 'mits_document', type_='foreignkey')
op.create_foreign_key(op.f('fk_mits_document_game_id_mits_game'), 'mits_document', 'mits_game', ['game_id'], ['id'])
op.drop_column('mits_document', 'team_id')
op.add_column('mits_picture', sa.Column('game_id', residue.UUID(), nullable=False))
op.drop_constraint('fk_mits_picture_team_id_mits_team', 'mits_picture', type_='foreignkey')
op.create_foreign_key(op.f('fk_mits_picture_game_id_mits_game'), 'mits_picture', 'mits_game', ['game_id'], ['id'])
op.drop_column('mits_picture', 'team_id')
def downgrade():
op.add_column('mits_picture', sa.Column('team_id', postgresql.UUID(), autoincrement=False, nullable=False))
op.drop_constraint(op.f('fk_mits_picture_game_id_mits_game'), 'mits_picture', type_='foreignkey')
op.create_foreign_key('fk_mits_picture_team_id_mits_team', 'mits_picture', 'mits_team', ['team_id'], ['id'])
op.drop_column('mits_picture', 'game_id')
op.add_column('mits_document', sa.Column('team_id', postgresql.UUID(), autoincrement=False, nullable=False))
op.drop_constraint(op.f('fk_mits_document_game_id_mits_game'), 'mits_document', type_='foreignkey')
op.create_foreign_key('fk_mits_document_team_id_mits_team', 'mits_document', 'mits_team', ['team_id'], ['id'])
op.drop_column('mits_document', 'game_id')
| agpl-3.0 | -4,039,767,539,974,114,300 | 41.297297 | 120 | 0.651757 | false |
Coburn37/DoxygenMediawikiBot | doxymw.py | 1 | 9187 | #A python bot sitting atop PyWikibot and Doxygen to automatically
#add doxygen docs to a wiki for your documentation pleasure
#The main goal of this is to take the power and placement of doxygen docs
#and combine it with the flexibility and remoteness of a wiki
import re
import os
import sys
import subprocess
import errno
import pywikibot
import doxymwglobal
from doxymwsite import DoxyMWSite
from doxymwpage import DoxygenHTMLPage
#Calls doxygen using a config file and outputs everything to a temporary path
def generateDoxygenHTMLDocs():
#Try the config file
with open(doxymwglobal.config["doxygen_configPath"]) as fp:
configLines = fp.readlines()
fp.seek(0)
config = fp.read()
#Parameters we must force to generate proper, small, output
params = {}
params["doxygen_paramsForce"] = {
#Output file format and location
#Critical
"OUTPUT_DIRECTORY" : "\"" + doxymwglobal.config["doxygen_tmpPath"] + "\"",
"GENERATE_HTML" : "YES",
"HTML_OUTPUT" : "html",
"HTML_FILE_EXTENSION" : ".html",
"HIDE_COMPOUND_REFERENCE": "YES", #Cleaner titles
#Disabling specific HTML sections
#Possibly critical, makes HTML easier to work with
"DISABLE_INDEX" : "YES",
"SEARCHENGINE" : "NO",
#Turn off other generation
#Not critical but wanted
#Extra HTML
"GENERATE_DOCSET" : "NO",
"GENERATE_HTMLHELP" : "NO",
"GENERATE_QHP" : "NO",
"GENERATE_ECLIPSEHELP" : "NO",
"GENERATE_TREEVIEW" : "NO",
#Other generations
"GENERATE_LATEX" : "NO",
"GENERATE_RTF" : "NO",
"GENERATE_XML" : "NO",
"GENERATE_DOCBOOK" : "NO",
"GENERATE_AUTOGEN_DEF" : "NO",
"GENERATE_PERLMOD" : "NO"
}
#Parameters we warn about but do not enforce
params["doxygen_paramsWarn"] = {
"CASE_SENSE_NAMES" : "NO" #MediaWiki doesn't support case sensitivity in title names
}
#Read each line for params to warn about
warnParams = params["doxygen_paramsWarn"]
for line in configLines:
#Comments
if line[0] == "#":
continue
match = re.match('\s*(\S+)\s*=\s+(\S*)', line)
if match:
k, v = match.group(0,1)
#Warn about specific parameters
for warn in warnParams.keys():
if k == warn and v != warnParams[warn]:
doxymwglobal.msg(doxymwglobal.warning, "Doxygen config has parameter " + warn + " not set to " + warnParams[warn] + " which may cause problems.")
#Append the force tags to the end (overwrite the other values)
forceParams = params["doxygen_paramsForce"]
for force in forceParams.keys():
config += "\n" + force + " = " + forceParams[force]
#Call doxygen, piping the config to it
with subprocess.Popen([doxymwglobal.config["doxygen_binaryPath"] + "/doxygen.exe", "-"], stdin=subprocess.PIPE, universal_newlines=True) as proc:
proc.communicate(input=config, timeout=20)
#Return after finished
#Reads the doxygen documents at the specified path and returns a list of wikiPages
def readDoxygenHTMLDocs():
#List of all the actual wiki pages
wikiPages = []
#Doxygen generates all it's files with prefixes by type
#This is not an exhaustive list, some configuration patterns have not been tested
#Files, prefix "_"
#Interfaces, prefix "interface_"
#Namespaces, prefix "namespace_"
#Classes, prefix "class_"
#Members lists, suffix "-members"
params = {}
params["doxygen_filePrefixes"] = {
"-members$" : "MEMBERS", #Match members lists first
"^_" : "FILE",
"^namespace_" : "NAMESPACE",
"^class_" : "CLASS",
"^interface_" : "INTERFACE"
}
#Other files we want (useful and don't provide redundancies to MediaWiki functionality)
#Class hierarchy, hierarchy.html
params["doxygen_otherFiles"] = [
"hierarchy"
]
for root, dirs, files in os.walk(doxymwglobal.config["doxygen_tmpPath"] + "/html"):
for file in files:
#Get all the file info
fileAbs = os.path.abspath(root + "\\" + file)
fileAbsPath, t = os.path.split(fileAbs)
fileRel = "./" + os.path.relpath(fileAbs, doxymwglobal.config["doxygen_tmpPath"])
fileRelPath, fileTail = os.path.split(fileRel)
fileName, fileExt = os.path.splitext(fileTail)
#Filter out by extension
if fileExt != ".html":
continue
#Check special files and type
fileDoxyType = None
#Special ("other") files
for other in params["doxygen_otherFiles"]:
if fileName == other:
fileDoxyType = "OTHER"
break
#Check type
if not fileDoxyType:
for regex, type in params["doxygen_filePrefixes"].items():
if re.search(regex, fileName):
fileDoxyType = type
break
#Filter out the html files without type
if fileDoxyType == None:
continue
#Make the doxygen wiki page object
page = DoxygenHTMLPage(fileAbsPath, fileTail, fileDoxyType)
wikiPages.append(page)
return wikiPages
def main():
#( 0 ) Get opts
from doxymwglobal import option #Default opts
#Argv[1] must be a command
if len(sys.argv) < 2:
doxymwglobal.msg(doxymwglobal.msgType.error, "Too few arguments given", usage=True)
option["command"] = sys.argv[1]
if option["command"] != "cleanup" and option["command"] != "update":
doxymwglobal.msg(doxymwglobal.msgType.error, "Invalid command specified", usage=True)
#Argv[2:] must be other flags
for arg in sys.argv[2:]:
if arg == "-i" or arg == "--interactive":
option["interactive"] = True
elif arg == "-w" or arg == "--warnIsError":
option["warnIsError"] = True
elif arg == "-h" or arg == "--help":
printHelp()
return
elif arg.find("-d:") == 0 or arg.find("--debug:") == 0:
whichDebug = arg.split(":")[1]
if whichDebug != "doxygen" and whichDebug != "unsafeUpdate" and whichDebug != "whichDelete":
doxymwglobal.msg(doxymwglobal.msgType.error, "Invalid debug specified " + whichDebug, usage=True)
else:
option["debug"].append(whichDebug)
elif arg.find("-p:") == 0 or arg.find("--printLevel:") == 0:
printLevel = arg.split(":")[1]
try:
#Try it as an int
printLevelInt = int(printLevel)
option["printLevel"] = doxymwglobal.msgType(printLevelInt)
except ValueError:
try:
#Try it as a string of the MsgType enum
option["printLevel"] = doxymwglobal.msgType[printLevel.lower()]
except KeyError:
doxymwglobal.msg(doxymwglobal.msgType.error, "Invalid printLevel " + printLevel, usage=True)
else:
doxymwglobal.msg(doxymwglobal.msgType.error, "Invalid option", usage=True)
#Do the actual operation
if option["command"] == "update":
#( 1 ) Generate the doxygen docs
generateDoxygenHTMLDocs()
#( 2 )Sort through all files and get the ones we want to parse
wikiPages = readDoxygenHTMLDocs()
#( 3 )Ready the page by getting everything into valid wiki markup
for page in wikiPages:
doxymwglobal.msg(doxymwglobal.msgType.info, "Converting " + page.filename)
page.convert(wikiPages)
#Debug the first portion, outputs everything to an html file
if "doxygen" in option["debug"]:
debugPath = doxymwglobal.debugPath()
for page in wikiPages:
doxymwglobal.msg(doxymwglobal.msgType.debug, "Debug output " + page.filename)
fp = open(debugPath + "/" + page.filename, 'w', errors="replace")
strr = page.mwtitle+"<br><br>"+page.mwcontents
fp.write(strr)
return
#( 4 )Perform all the wiki tasks
#Make sure we're logged in
site = pywikibot.Site()
#Make a site, run the command
site = DoxyMWSite(site)
if option["command"] == "cleanup":
site.cleanup()
if option["command"] == "update":
site.update(wikiPages)
#( 5 ) We're done!
doxymwglobal.msg(doxymwglobal.msgType.info, "Done")
if __name__ == '__main__':
main() | mit | -1,648,275,014,650,735,000 | 36.966942 | 169 | 0.560139 | false |
Ledoux/ShareYourSystem | Pythonlogy/draft/Representer/__init__.py | 1 | 25779 | # -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
The Representer is an important module for beginning to visualize
the structures of the instanced variables in the environnment.
The idea is to use the indenting representation like in the json.dump
function but with a more suitable (but maybe dirty) access to the
AlineaStr of each lines of the output, depending on the state
of the variables. Instances that are created from the decorated class have
a __repr__ method, helping for mentionning for the represented attributes where
do they come from : <Spe> (resp. <Base>) is they were defined at the level of the \_\_class\_\_
and <Instance> (resp. <Class>) if they are getted from the <InstanceVariable>.__dict__
(resp. <InstanceVariable>.__class__.__dict__)
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Classors.Inspecter"
DecorationModuleStr=BaseModuleStr
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import collections
import copy
import inspect
import numpy
import sys
from ShareYourSystem.Standards.Objects import Initiator
#</ImportSpecificModules>
#<DefineLocals>
RepresentingDictIndentStr=" "
RepresentingListIndentStr=" "
RepresentingIndentStr=" /"
RepresentingEofStr="\n"
RepresentingIdBool=True
RepresentingCircularStr="{...}"
RepresentedAlineaStr=""
RepresentedAlreadyIdIntsList=[]
#</DefineLocals>
#<DefineFunctions>
def getRepresentedNumpyArray(_NumpyArray):
#Definition the ShapeList
ShapeList=list(numpy.shape(_NumpyArray))
#debug
'''
print('Representer l.25 : getRepresentedNumpyArray')
print('ShapeList is',ShapeList)
print('')
'''
#Return the array directly if it is small or either a short represented version of it
if (len(ShapeList)==1 and ShapeList[0]<3) or (len(ShapeList)>1 and ShapeList[1]<3):
return str(_NumpyArray)
return "<numpy.ndarray shape "+str(ShapeList)+">"
def getRepresentedPointerStrWithVariable(_Variable,**_KwargVariablesDict):
#debug
'''
print('Representer l.39 : getRepresentedPointerStrWithVariable')
print('')
'''
#set in the _KwargVariablesDict
if 'RepresentedDeepInt' not in _KwargVariablesDict:
_KwargVariablesDict['RepresentedDeepInt']=0
#Definition the Local alinea
RepresentedLocalAlineaStr=RepresentedAlineaStr if _KwargVariablesDict['RepresentedDeepInt']==0 else ""
#Define
if type(_Variable).__name__=='Database':
RepresentedVariableStr=_Variable._Database__name
else:
RepresentedVariableStr=_Variable.__name__ if hasattr(_Variable,__name__) else ""
#Debug
'''
print('l 85 Representer')
print('type(_Variable).__name__ is ')
print(type(_Variable).__name__)
print('RepresentedVariableStr is ')
print(RepresentedVariableStr)
print('')
'''
#Check
if RepresentingIdBool:
return RepresentedLocalAlineaStr+"<"+RepresentedVariableStr+" ("+_Variable.__class__.__name__+"), "+str(id(_Variable))+">"
else:
return RepresentedLocalAlineaStr+"<"+RepresentedVariableStr+" ("+_Variable.__class__.__name__+")"+" >"
def getRepresentedStrWithDictatedVariable(
_DictatedVariable,**_KwargVariablesDict
):
#set in the _KwargVariablesDict
if 'RepresentedDeepInt' not in _KwargVariablesDict:
_KwargVariablesDict['RepresentedDeepInt']=0
#debug
'''
print('Representer l.59 : getRepresentedStrWithDictatedVariable')
print('_KwargVariablesDict is ',str(_KwargVariablesDict))
print('')
'''
#Global
global RepresentedAlineaStr
#Definition the LocalRepresentedAlineaStr
LocalRepresentedAlineaStr=RepresentedAlineaStr+"".join(
[RepresentingIndentStr]*(_KwargVariablesDict['RepresentedDeepInt']))
#Init the RepresentedDictStr
RepresentedDictStr="\n"+LocalRepresentedAlineaStr+"{ "
#Scan the Items (integrativ loop)
if type(_DictatedVariable)!=dict and hasattr(_DictatedVariable,"items"):
#debug
'''
print('l 135 Representer')
print('_DictatedVariable is ')
print(_DictatedVariable)
print('')
'''
#items
RepresentedTuplesList=_DictatedVariable.items()
else:
#sort
RepresentedTuplesList=sorted(
_DictatedVariable.iteritems(), key=lambda key_value: key_value[0]
)
#Integrativ loop for seriaizing the items
for __RepresentedKeyStr,__RepresentedValueVariable in RepresentedTuplesList:
#debug
'''
print('Representer l.127')
print('__RepresentedKeyStr is',__RepresentedKeyStr)
print('')
'''
#set the begin of the line
RepresentedDictStr+="\n"+LocalRepresentedAlineaStr+RepresentingDictIndentStr
#Force the cast into Str
if type(__RepresentedKeyStr) not in [unicode,str]:
__RepresentedKeyStr=str(__RepresentedKeyStr)
#Get the WordStrsList
WordStrsList=SYS.getWordStrsListWithStr(__RepresentedKeyStr)
#Init the RepresentedValueVariableStr
RepresentedValueVariableStr="None"
#Split the case if it is a pointing variable or not
if len(WordStrsList)>0:
#Value is displayed
"""
if SYS.getWordStrsListWithStr(__RepresentedKeyStr)[-1]=="Pointer":
#Pointer Case
RepresentedValueVariableStr=getRepresentedPointerStrWithVariable(
__RepresentedValueVariable,
**_KwargVariablesDict
)
"""
"""
elif ''.join(SYS.getWordStrsListWithStr(__RepresentedKeyStr)[-2:])=="PointersList":
#debug
'''
print('__RepresentedValueVariable is ',__RepresentedValueVariable)
print('')
'''
#Pointer Case
RepresentedValueVariableStr=str(
map(
lambda ListedVariable:
getRepresentedPointerStrWithVariable(
ListedVariable,
**_KwargVariablesDict),
__RepresentedValueVariable
)
) if type(__RepresentedValueVariable)==list else "None"
"""
#Special Suffix Cases
if RepresentedValueVariableStr=="None":
#debug
'''
print('go to represent')
print('__RepresentedKeyStr is ',__RepresentedKeyStr)
print('id(__RepresentedValueVariable) is ',id(__RepresentedValueVariable))
print('')
'''
#Other Cases
RepresentedValueVariableStr=getRepresentedStrWithVariable(
__RepresentedValueVariable,
**_KwargVariablesDict
)
#Key and Value Case
RepresentedDictStr+="'"+__RepresentedKeyStr+"' : "+RepresentedValueVariableStr
#Add a last line
RepresentedDictStr+="\n"+LocalRepresentedAlineaStr+"}"
#debug
'''
print('RepresentedDictStr is ',RepresentedDictStr)
print('')
'''
#return the DictStr
return RepresentedDictStr
def getRepresentedStrWithListedVariable(_ListedVariable,**_KwargVariablesDict):
#Global
global RepresentedAlineaStr
#set in the _KwargVariablesDict
if 'RepresentedDeepInt' not in _KwargVariablesDict:
_KwargVariablesDict['RepresentedDeepInt']=0
#debug
'''
print('Representer l.166 : getRepresentedStrWithListedVariable')
print('_KwargVariablesDict is ',str(_KwargVariablesDict))
print('_ListedVariable is '+str(_ListedVariable))
print('')
'''
#Init the RepresentedDictStr
if type(_ListedVariable)==list:
BeginBracketStr='['
EndBracketStr=']'
else:
BeginBracketStr='('
EndBracketStr=')'
#Definition the LocalRepresentedAlineaStr
LocalRepresentedAlineaStr=RepresentedAlineaStr+"".join(
[RepresentingIndentStr]*(_KwargVariablesDict['RepresentedDeepInt']))
#Do the first Jump
RepresentedListStr="\n"+LocalRepresentedAlineaStr+BeginBracketStr
#Scan the Items (integrativ loop)
for ListedVariableInt,ListedVariable in enumerate(_ListedVariable):
#set the begin of the line
RepresentedListStr+="\n"+LocalRepresentedAlineaStr+RepresentingListIndentStr
#Get the represented version
RepresentedValueVariableStr=getRepresentedStrWithVariable(
ListedVariable,**dict(_KwargVariablesDict,**{'RepresentingAlineaIsBool':False})
)
#Key and Value Case
RepresentedListStr+=str(ListedVariableInt)+" : "+RepresentedValueVariableStr
#Add a last line
RepresentedListStr+="\n"+LocalRepresentedAlineaStr+EndBracketStr
#return the DictStr
return RepresentedListStr
def getRepresentedStrWithVariable(_Variable,**_KwargVariablesDict):
#Define global
global RepresentedAlreadyIdIntsList
#set in the _KwargVariablesDict
if 'RepresentedDeepInt' not in _KwargVariablesDict:
_KwargVariablesDict['RepresentedDeepInt']=0
#debug
'''
print('Representer l.213 : getRepresentedStrWithVariable')
#print('_KwargVariablesDict is ',str(_KwargVariablesDict))
#print('_Variable is '+str(_Variable))
print('type(_Variable) is '+str(type(_Variable)))
#print("hasattr(_Variable,'__repr__') is "+str(hasattr(_Variable,"__repr__")))
##if hasattr(_Variable,"__repr__"):
# print('hasattr(_Variable.__class__,"InspectedOrderedDict") is '+str(
# hasattr(_Variable.__class__,"InspectedOrderedDict")))
# if hasattr(_Variable.__class__,"InspectedOrderedDict"):
# print("_Variable.__class__.InspectedOrderedDict['__repr__']['KwargVariablesSetKeyStr'] is "+str(
# _Variable.__class__.InspectedOrderedDict['__repr__']['KwargVariablesSetKeyStr']))
# print(_Variable.__class__.InspectedOrderedDict['__repr__']['KwargVariablesSetKeyStr'])
print('')
'''
#None type
if type(_Variable)==None.__class__:
return "None"
#Dict types print
#if type(_Variable) in [dict,collections.OrderedDict]:
if hasattr(_Variable,'items') and type(_Variable)!=type:
#Increment the deep
_KwargVariablesDict['RepresentedDeepInt']+=1
#debug
'''
print('This is a dictated type so get a represent like a dict')
print('')
'''
#id
RepresentedIdInt=id(_Variable)
#debug
'''
print('RepresentedIdInt is ',RepresentedIdInt)
print('RepresentedAlreadyIdIntsList is ',RepresentedAlreadyIdIntsList)
print('')
'''
#Check if it was already represented
if RepresentedIdInt not in RepresentedAlreadyIdIntsList:
#Debug
'''
print('RepresentedAlreadyIdIntsList is ',RepresentedAlreadyIdIntsList)
print('')
'''
#append
RepresentedAlreadyIdIntsList.append(RepresentedIdInt)
#Return the repr of the _Variable but shifted with the RepresentedAlineaStr
RepresentedStr=getRepresentedStrWithDictatedVariable(
_Variable,
**_KwargVariablesDict
)
else:
#Return the circular Str
RepresentedStr=RepresentingCircularStr+getRepresentedPointerStrWithVariable(_Variable)
#Debug
'''
print('RepresentedIdInt is ',RepresentedIdInt)
print('RepresentedStr is ',RepresentedStr)
print('')
'''
#return
return RepresentedStr
#List types print
elif type(_Variable) in [list,tuple]:
#id
RepresentedIdInt=id(_Variable)
#Check if it was already represented
if RepresentedIdInt not in RepresentedAlreadyIdIntsList:
#debug
'''
print('This is a listed type so get a represent like a list')
print('')
'''
#append
RepresentedAlreadyIdIntsList.append(RepresentedIdInt)
#Check if it is a List of Objects or Python Types
if all(
map(
lambda ListedVariable:
type(ListedVariable) in [float,int,str,unicode,numpy.float64] or ListedVariable==None,
_Variable
)
)==False:
#Increment the deep
_KwargVariablesDict['RepresentedDeepInt']+=1
#debug
'''
print('Print a represented version of the list')
print('')
'''
#Return
RepresentedStr=getRepresentedStrWithListedVariable(_Variable,**_KwargVariablesDict)
else:
#debug
'''
print('Here just print the list directly')
print('')
'''
#Definition the Local alinea
RepresentedLocalAlineaStr=RepresentedAlineaStr if _KwargVariablesDict['RepresentedDeepInt']==0 else ""
#Return
RepresentedStr=RepresentedLocalAlineaStr+repr(
_Variable).replace("\n","\n"+RepresentedLocalAlineaStr)
#return
return RepresentedStr
else:
#Return the circular Str
return RepresentingCircularStr+getRepresentedPointerStrWithVariable(_Variable)
#Instance print
elif type(_Variable).__name__ in ["instancemethod"]:
#Debug
'''
print('Representer l 421')
print('This is a method ')
print('_Variable.__name__ is ',_Variable.__name__)
print('')
'''
#Definition the Local alinea
RepresentedLocalAlineaStr=RepresentedAlineaStr if _KwargVariablesDict['RepresentedDeepInt']==0 else ""
#append
RepresentedAlreadyIdIntsList.append(_Variable.im_self)
#return RepresentedAlineaStr+"instancemethod"
RepresentedStr=RepresentedLocalAlineaStr
RepresentedStr+="< bound method "+_Variable.__name__
RepresentedStr+=" of "+str(_Variable.im_self.__class__)
RepresentedStr+=" "+str(id(_Variable.im_self))+" >"
#RepresentedStr='inst'
#return
return RepresentedStr
#Str types
elif type(_Variable) in SYS.StrTypesList:
#debug
'''
print('This is a Str type so get a represent like a Str')
print('')
'''
#Definition the Local alinea
RepresentedLocalAlineaStr=RepresentedAlineaStr if _KwargVariablesDict['RepresentedDeepInt']==0 else ""
#Return
return RepresentedLocalAlineaStr+_Variable.replace("\n","\n"+RepresentedLocalAlineaStr)
#Other
elif hasattr(_Variable,"__repr__") and hasattr(
_Variable.__class__,"InspectedArgumentDict"
) and '__repr__' in _Variable.__class__.InspectedArgumentDict and _Variable.__class__.InspectedArgumentDict[
'__repr__']['KwargVariablesSetKeyStr']!="":
#debug
'''
print('This is a representer so call the repr of it with the _KwargVariablesDict')
print('type(_Variable) is ',type(_Variable))
print('id(_Variable) is ',id(_Variable))
print('')
'''
#id
RepresentedIdInt=id(_Variable)
#Check if it was already represented
if RepresentedIdInt not in RepresentedAlreadyIdIntsList:
#append
RepresentedAlreadyIdIntsList.append(RepresentedIdInt)
#Return the repr of the _Variable but shifted with the RepresentedAlineaStr
RepresentedStr=_Variable.__repr__(**_KwargVariablesDict)
#return
return RepresentedStr
else:
#Return the circular Str
return RepresentingCircularStr+getRepresentedPointerStrWithVariable(_Variable)
else:
#Debug
'''
print('This is not identified so call the repr of it')
print('')
'''
#Definition the Local alinea
RepresentedLocalAlineaStr=RepresentedAlineaStr if _KwargVariablesDict[
'RepresentedDeepInt']==0 else ""
#Define
RepresentedIdInt=id(_Variable)
#Debug
'''
print('RepresentedIdInt is ',RepresentedIdInt)
print('RepresentedAlreadyIdIntsList is ',RepresentedAlreadyIdIntsList)
print('')
'''
#Check if it was already represented
if RepresentedIdInt not in RepresentedAlreadyIdIntsList:
#debug
'''
print('type(_Variable) is ',type(_Variable))
print('')
'''
#Append but only for mutables variable
if type(_Variable) not in [bool,str,int,float]:
RepresentedAlreadyIdIntsList.append(RepresentedIdInt)
else:
#debug
'''
print('_Variable is ',_Variable)
print('')
'''
pass
#Return a repr of the _Variable but shifted with the RepresentedAlineaStr
RepresentedStr=RepresentedLocalAlineaStr+repr(_Variable).replace(
"\n",
"\n"+RepresentedLocalAlineaStr
)
#return
return RepresentedStr
else:
#Return the circular Str
return RepresentedLocalAlineaStr+RepresentingCircularStr+getRepresentedPointerStrWithVariable(
_Variable)
def _print(_Variable,**_KwargVariablesDict):
print(represent(_Variable,**_KwargVariablesDict))
def represent(_Variable,**_KwargVariablesDict):
#Definition the global
global RepresentedAlineaStr,RepresentedAlreadyIdIntsList
#Debug
'''
print('Representer l.545')
print('Reinit the RepresentedAlreadyIdIntsList')
print('')
'''
#Reinit
RepresentedAlreadyIdIntsList=[]
#Debug
'''
print('Representer l.554')
print('_KwargVariablesDict is ',_KwargVariablesDict)
print('')
'''
#Represent without shifting the Strs or not
if 'RepresentingAlineaIsBool' not in _KwargVariablesDict or _KwargVariablesDict['RepresentingAlineaIsBool']:
return getRepresentedStrWithVariable(_Variable,**_KwargVariablesDict)
else:
RepresentedOldAlineaStr=RepresentedAlineaStr
RepresentedAlineaStr=""
RepresentedStr=getRepresentedStrWithVariable(_Variable,**_KwargVariablesDict)
RepresentedAlineaStr=RepresentedOldAlineaStr
return RepresentedStr
#</DefineFunctions>
#Link
def __main__represent(_RepresentingStr,**_KwargVariablesDict):
return represent(
_RepresentingStr,
**dict(_KwargVariablesDict,**{'RepresentingAlineaIsBool':False})
)
def __main__print(_RepresentingStr,**_KwargVariablesDict):
return _print(
_RepresentingStr,
**dict(_KwargVariablesDict,**{'RepresentingAlineaIsBool':False})
)
SYS._str = __main__represent
SYS._print = __main__print
#<DefineClass>
@DecorationClass()
class RepresenterClass(BaseClass):
def default_init(self,**_KwargVariablesDict):
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
def __call__(self,_Class):
#debug
'''
print('Representer l.478 : _Class is ',_Class)
print('')
'''
#Call the parent init method
BaseClass.__call__(self,_Class)
#debug
'''
print('Representer l.485 : self.DoClass is ',self.DoClass)
print('')
'''
#Represent
self.represent()
#Return
return _Class
def do_represent(self):
#alias
RepresentedClass=self.DoClass
#debug
'''
print('Representer l.352 : RepresentedClass is ',RepresentedClass)
print('')
'''
#Check
'''
if hasattr(RepresentedClass,'RepresentingKeyStrsList')==False or (
len(RepresentedClass.__bases__)>0 and hasattr(RepresentedClass.__bases__[0
],'RepresentingKeyStrsList') and RepresentedClass.__bases__[0
].RepresentingKeyStrsList==RepresentedClass.RepresentingKeyStrsList):
#init
RepresentedClass.RepresentingKeyStrsList=[]
'''
RepresentedClass.RepresentingKeyStrsList=RepresentedClass.DefaultSetKeyStrsList
#init
#RepresentedClass.RepresentingSkipKeyStrsList=None
#init
#RepresentedClass.RepresentingForceKeyStrsList=None
#set the BaseKeyStrsList
KeyStrsSet=set(
SYS.collect(
RepresentedClass,
'__bases__',
'RepresentingKeyStrsList'
)
)
#KeyStrsSet.difference_update(set(RepresentedClass.RepresentingKeyStrsList))
RepresentedClass.RepresentedBaseKeyStrsList=list(KeyStrsSet)
#Split between the one from the class or not
[
RepresentedClass.RepresentedSpecificKeyStrsList,
RepresentedClass.RepresentedNotSpecificKeyStrsList
]=SYS.groupby(
lambda __KeyStr:
__KeyStr not in RepresentedClass.RepresentedBaseKeyStrsList,
RepresentedClass.RepresentingKeyStrsList
)
#debug
'''
print(
RepresentedClass.__name__,
#Class.__mro__,
#Class.RepresentedNotGettingStrsList,
list(RepresentedClass.RepresentedBasedKeyStrsList)
)
'''
#Add to the KeyStrsList
RepresentedClass.KeyStrsList+=[
'RepresentingKeyStrsList',
'RepresentingSkipKeyStrsList',
'RepresentingForceKeyStrsList',
'RepresentedBaseKeyStrsList',
'RepresentedSpecificKeyStrsList',
'RepresentedNotSpecificKeyStrsList',
]
"""
#Definition the representing methods
def represent(_InstanceVariable,**_KwargVariablesDict):
#debug
'''
_InstanceVariable.debug(('RepresentedClass',RepresentedClass,[
'RepresentingKeyStrsList',
'RepresentedBaseKeyStrsList',
'RepresentedSpecificKeyStrsList',
'RepresentedNotSpecificKeyStrsList'
]))
'''
#Represent the Specific KeyStrs
RepresentedTuplesList=map(
lambda __RepresentingSpecificKeyStr:
(
"<Spe>"+("<Instance>"
if __RepresentingSpecificKeyStr in _InstanceVariable.__dict__
else "<Class>"
)+__RepresentingSpecificKeyStr
,
getattr(_InstanceVariable,__RepresentingSpecificKeyStr)
),
RepresentedClass.RepresentedSpecificKeyStrsList
)
#Represent the BaseKeyStrs
if 'RepresentingBaseKeyStrsListBool' in _KwargVariablesDict and _KwargVariablesDict['RepresentingBaseKeyStrsListBool']:
RepresentedTuplesList+=map(
lambda __NotSpecificKeyStrsList:
(
"<Base>"+("<Instance>"
if __NotSpecificKeyStrsList in _InstanceVariable.__dict__
else "<Class>"
)+__NotSpecificKeyStrsList
,
getattr(_InstanceVariable,__NotSpecificKeyStrsList)
),
RepresentedClass.RepresentedNotSpecificKeyStrsList
)
RepresentedTuplesList+=map(
lambda __RepresentedBaseKeyStr:
(
"<Base>"+("<Instance>"
if __RepresentedBaseKeyStr in _InstanceVariable.__dict__
else "<Class>"
)+__RepresentedBaseKeyStr
,
getattr(_InstanceVariable,__RepresentedBaseKeyStr)
),
RepresentedClass.RepresentedBaseKeyStrsList
)
#Represent the NewInstanceKeyStrs in the __dict__
if 'RepresentingNewInstanceKeyStrsListBool' not in _KwargVariablesDict or _KwargVariablesDict[
'RepresentingNewInstanceKeyStrsListBool']:
#filter
RepresentedNewInstanceTuplesList=SYS._filter(
lambda __NewItemTuple:
__NewItemTuple[0] not in RepresentedClass.DefaultSetKeyStrsList+RepresentedClass.DefaultBaseSetKeyStrsList,
_InstanceVariable.__dict__.items()
)
#Debug
'''
print('RepresentedNewInstanceTuplesList is ')
print(RepresentedNewInstanceTuplesList)
print('RepresentedClass.RepresentingSkipKeyStrsList is ')
print(RepresentedClass.RepresentingSkipKeyStrsList)
print('')
'''
#Check
if _InstanceVariable.RepresentingSkipKeyStrsList==None:
_InstanceVariable.RepresentingSkipKeyStrsList=[]
#filter
RepresentedNewInstanceTuplesList=SYS._filter(
lambda __RepresentedNewInstanceTuple:
__RepresentedNewInstanceTuple[0] not in _InstanceVariable.RepresentingSkipKeyStrsList,
RepresentedNewInstanceTuplesList
)
#Debug
'''
print('RepresentedNewInstanceTuplesList is ')
print(RepresentedNewInstanceTuplesList)
print('')
'''
#map
RepresentedTuplesList+=map(
lambda __NewItemTuple:
(
"<New><Instance>"+__NewItemTuple[0],
__NewItemTuple[1]
),
RepresentedNewInstanceTuplesList
)
#Represent the NewClassKeyStrs in the _RepresentedClass__.__dict__
if 'RepresentingNewClassKeyStrsListBool' not in _KwargVariablesDict or _KwargVariablesDict[
'RepresentingNewClassKeyStrsListBool']:
RepresentedTuplesList+=map(
lambda __NewKeyStr:
(
"<New><Class>"+__NewKeyStr,
_InstanceVariable.__class__.__dict__[__NewKeyStr]
),
SYS._filter(
lambda __KeyStr:
__KeyStr not in RepresentedClass.KeyStrsList and __KeyStr not in _InstanceVariable.__dict__,
SYS.getKeyStrsListWithClass(
_InstanceVariable.__class__
)
)
)
if 'RepresentingNotConcludeTuplesList' in _KwargVariablesDict:
#Debug
'''
print('l 792 Representer')
print('RepresentedTuplesList is ')
print(RepresentedTuplesList)
print('')
'''
#filter
RepresentedTuplesList=SYS._filter(
lambda __RepresentedTuple:
any(
map(
lambda __RepresentingNotConcludeTuple:
__RepresentingNotConcludeTuple[0](
__RepresentedTuple,
__RepresentingNotConcludeTuple[1]
),
_KwargVariablesDict['RepresentingNotConcludeTuplesList']
)
)==False,
RepresentedTuplesList
)
#Debug
'''
print('l 815 Representer')
print('RepresentedTuplesList is ')
print(RepresentedTuplesList)
print('')
'''
if 'RepresentingKeyStrsList' in _KwargVariablesDict:
RepresentedTuplesList+=map(
lambda __RepresentingKeyStr:
(
"<Spe><Instance>"+__RepresentingKeyStr,
_InstanceVariable.__dict__[__RepresentingKeyStr]
)
if __RepresentingKeyStr in _InstanceVariable.__dict__ and __RepresentingKeyStr not in RepresentedClass.DefaultSetKeyStrsList
else(
(
"<Base><Instance>"+__RepresentingKeyStr,
_InstanceVariable.__dict__[__RepresentingKeyStr]
)
if __RepresentingKeyStr in _InstanceVariable.__dict__ and __RepresentingKeyStr in RepresentedClass.DefaultBaseSetKeyStrsList
else
(
(
"<Base><Class>"+__RepresentingKeyStr,
getattr(_InstanceVariable,__RepresentingKeyStr)
)
if __RepresentingKeyStr not in _InstanceVariable.__dict__
else
(
"<New><Instance>"+__RepresentingKeyStr,
_InstanceVariable.__dict__[__RepresentingKeyStr]
)
)
),
_KwargVariablesDict['RepresentingKeyStrsList'
]+_InstanceVariable.RepresentingForceKeyStrsList
)
#Append
global RepresentedAlreadyIdIntsList
#debug
'''
print('Represener l.629')
print('id(_InstanceVariable) is ',id(_InstanceVariable))
print('_InstanceVariable not in RepresentedAlreadyIdIntsList is ',str(
_InstanceVariable not in RepresentedAlreadyIdIntsList))
print('')
'''
#define the RepresentedStr
return getRepresentedPointerStrWithVariable(
_InstanceVariable
)+getRepresentedStrWithVariable(
dict(RepresentedTuplesList),
**_KwargVariablesDict
)
"""
#Bound and set in the InspectedOrderedDict
#RepresentedClass.__repr__=represent
#RepresentedClass.InspectedArgumentDict['__repr__']=SYS.ArgumentDict(
# RepresentedClass.__repr__)
#</DefineClass>
#set in the InitiatorClass
Initiator.InitiatorClass.RepresentedNotGettingStrsList=['InitiatingUpdateBool']
Initiator.InitiatorClass.RepresentedSpecificKeyStrsList=['InitiatingUpdateBool']
| mit | 3,242,304,788,324,602,400 | 25.881126 | 132 | 0.704255 | false |
peheje/baselines | baselines/deepq/experiments/atari/enjoy.py | 1 | 2514 | import argparse
import gym
import os
import numpy as np
from gym.monitoring import VideoRecorder
import baselines.common.tf_util as U
from baselines import deepq
from baselines.common.misc_util import (
boolean_flag,
SimpleMonitor,
)
from baselines.common.atari_wrappers_deprecated import wrap_dqn
from baselines.deepq.experiments.atari.model import model, dueling_model
def parse_args():
parser = argparse.ArgumentParser("Run an already learned DQN model.")
# Environment
parser.add_argument("--env", type=str, required=True, help="name of the game")
parser.add_argument("--model-dir", type=str, default=None, help="load model from this directory. ")
parser.add_argument("--video", type=str, default=None, help="Path to mp4 file where the video of first episode will be recorded.")
boolean_flag(parser, "stochastic", default=True, help="whether or not to use stochastic actions according to models eps value")
boolean_flag(parser, "dueling", default=False, help="whether or not to use dueling model")
return parser.parse_args()
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
env = SimpleMonitor(env)
env = wrap_dqn(env)
return env
def play(env, act, stochastic, video_path):
num_episodes = 0
video_recorder = None
video_recorder = VideoRecorder(
env, video_path, enabled=video_path is not None)
obs = env.reset()
while True:
env.unwrapped.render()
video_recorder.capture_frame()
action = act(np.array(obs)[None], stochastic=stochastic)[0]
obs, rew, done, info = env.step(action)
if done:
obs = env.reset()
if len(info["rewards"]) > num_episodes:
if len(info["rewards"]) == 1 and video_recorder.enabled:
# save video of first episode
print("Saved video.")
video_recorder.close()
video_recorder.enabled = False
print(info["rewards"][-1])
num_episodes = len(info["rewards"])
if __name__ == '__main__':
with U.make_session(4) as sess:
args = parse_args()
env = make_env(args.env)
act = deepq.build_act(
make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape, name=name),
q_func=dueling_model if args.dueling else model,
num_actions=env.action_space.n)
U.load_state(os.path.join(args.model_dir, "saved"))
play(env, act, args.stochastic, args.video)
| mit | -9,141,925,088,257,811,000 | 35.434783 | 134 | 0.650756 | false |
pbs/django-filer | filer/management/commands/take_out_filer_trash.py | 1 | 1910 | from django.core.management.base import BaseCommand
from filer.models import File, Folder
from filer import settings as filer_settings
from django.utils import timezone
from datetime import timedelta
class Command(BaseCommand):
help = "Hard-deletes old files and folders from filer trash."
def handle(self, *args, **options):
no_of_sec = filer_settings.FILER_TRASH_CLEAN_INTERVAL
time_threshold = timezone.now() - timedelta(seconds=no_of_sec)
files_ids = File.trash.filter(deleted_at__lt=time_threshold)\
.values_list('id', flat=True)
folder_ids = Folder.trash.filter(deleted_at__lt=time_threshold)\
.order_by('tree_id', '-level').values_list('id', flat=True)
if not folder_ids and not files_ids:
self.stdout.write("No old files or folders.\n")
return
for file_id in files_ids:
a_file = File.trash.get(id=file_id)
self.stdout.write("Deleting file %s: %s\n" % (
file_id, repr(a_file.file.name)))
try:
a_file.delete(to_trash=False)
except Exception as e:
self.stderr.write("%s\n" % str(e))
for folder_id in folder_ids:
a_folder = Folder.trash.get(id=folder_id)
ancestors = a_folder.get_ancestors(include_self=True)
path = repr('/'.join(ancestors.values_list('name', flat=True)))
if File.all_objects.filter(folder=folder_id).exists():
self.stdout.write("Cannot delete folder %s: %s since is "
"not empty.\n" % (folder_id, path))
continue
self.stdout.write(
"Deleting folder %s: %s\n" % (folder_id, path))
try:
a_folder.delete(to_trash=False)
except Exception as e:
self.stderr.write("%s\n" % str(e))
| bsd-3-clause | -1,781,223,535,377,464,600 | 40.521739 | 75 | 0.577487 | false |
vmagnin/pyxmltv | telecharger_xmltv.py | 1 | 2764 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Vincent MAGNIN, 2016-2019
'''
Fonction pour télécharger un fichier XMLTV
'''
import zipfile
import os
import re
import pickle
from urllib.request import urlretrieve, urlopen, URLError
def telecharger_xmltv(url, nom_fichier):
"""
Télécharge le fichier situé à url si une nouvelle version est disponible
"""
# On récupère l'ETag HTTP du fichier déjà éventuellement
# présent dans le répertoire du script :
try:
with open("ETag_xmltv.pickle", 'rb') as FICHIER_ETag:
ANCIEN_ETag = pickle.load(FICHIER_ETag)
except OSError:
ANCIEN_ETag = "0"
# On récupère l'ETag HTTP du zip sur le serveur :
try:
entete = urlopen(url+nom_fichier).info()
match = re.search(r'ETag: "(\w+-\w+-\w+)"', str(entete))
ETag = match.group(1)
except URLError:
ETag = "00"
print("URL erronée")
except AttributeError: # Si match est vide (pas de ETag disponible)
ETag = "00"
# On essaie d'utiliser à la place le champ Last-Modified
try:
entete = urlopen(url+nom_fichier).info()
match = re.search(r'Last-Modified: (.*)', str(entete))
ETag = match.group(1)
except AttributeError:
ANCIEN_ETag = "0" # On force le téléchargement du zip
# On retélécharge le zip s'il a été modifié sur le serveur:
if ETag != ANCIEN_ETag:
print("Chargement de la dernière version en ligne...")
try:
urlretrieve(url+nom_fichier, nom_fichier)
with zipfile.ZipFile(nom_fichier, 'r') as zfile:
zfile.extractall()
# On sauvegarde l'ETag du fichier zip :
with open("ETag_xmltv.pickle", 'wb') as FICHIER_ETag:
pickle.dump(ETag, FICHIER_ETag)
except URLError:
print("Attention ! Téléchargement nouveau fichier impossible...")
if not os.access(nom_fichier, os.F_OK):
print("Erreur : pas de fichier dans le répertoire courant !")
exit(2)
| gpl-3.0 | -181,304,397,692,986,200 | 35.972973 | 77 | 0.638523 | false |
mmgen/mmgen | test/test_py_d/ts_ref.py | 1 | 13630 | #!/usr/bin/env python3
#
# mmgen = Multi-Mode GENerator, command-line Bitcoin cold storage solution
# Copyright (C)2013-2021 The MMGen Project <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
ts_ref.py: Reference file tests for the test.py test suite
"""
import os
from mmgen.globalvars import g
from mmgen.opts import opt
from mmgen.wallet import MMGenMnemonic
from ..include.common import *
from .common import *
from .ts_base import *
from .ts_shared import *
wpasswd = 'reference password'
class TestSuiteRef(TestSuiteBase,TestSuiteShared):
'saved reference address, password and transaction files'
tmpdir_nums = [8]
networks = ('btc','btc_tn','ltc','ltc_tn')
passthru_opts = ('daemon_data_dir','rpc_port','coin','testnet')
sources = {
'ref_addrfile': '98831F3A{}[1,31-33,500-501,1010-1011]{}.addrs',
'ref_segwitaddrfile':'98831F3A{}-S[1,31-33,500-501,1010-1011]{}.addrs',
'ref_bech32addrfile':'98831F3A{}-B[1,31-33,500-501,1010-1011]{}.addrs',
'ref_keyaddrfile': '98831F3A{}[1,31-33,500-501,1010-1011]{}.akeys.mmenc',
'ref_passwdfile_b32_24': '98831F3A-фубар@crypto.org-b32-24[1,4,1100].pws',
'ref_passwdfile_b32_12': '98831F3A-фубар@crypto.org-b32-12[1,4,1100].pws',
'ref_passwdfile_b58_10': '98831F3A-фубар@crypto.org-b58-10[1,4,1100].pws',
'ref_passwdfile_b58_20': '98831F3A-фубар@crypto.org-b58-20[1,4,1100].pws',
'ref_passwdfile_hex_32': '98831F3A-фубар@crypto.org-hex-32[1,4,1100].pws',
'ref_passwdfile_hex_48': '98831F3A-фубар@crypto.org-hex-48[1,4,1100].pws',
'ref_passwdfile_hex_64': '98831F3A-фубар@crypto.org-hex-64[1,4,1100].pws',
'ref_passwdfile_bip39_12': '98831F3A-фубар@crypto.org-bip39-12[1,4,1100].pws',
'ref_passwdfile_bip39_18': '98831F3A-фубар@crypto.org-bip39-18[1,4,1100].pws',
'ref_passwdfile_bip39_24': '98831F3A-фубар@crypto.org-bip39-24[1,4,1100].pws',
'ref_passwdfile_xmrseed_25': '98831F3A-фубар@crypto.org-xmrseed-25[1,4,1100].pws',
'ref_passwdfile_hex2bip39_12': '98831F3A-фубар@crypto.org-hex2bip39-12[1,4,1100].pws',
'ref_tx_file': { # data shared with ref_altcoin, autosign
'btc': ('0B8D5A[15.31789,14,tl=1320969600].rawtx',
'0C7115[15.86255,14,tl=1320969600].testnet.rawtx'),
'ltc': ('AF3CDF-LTC[620.76194,1453,tl=1320969600].rawtx',
'A5A1E0-LTC[1454.64322,1453,tl=1320969600].testnet.rawtx'),
'bch': ('460D4D-BCH[10.19764,tl=1320969600].rawtx',
'359FD5-BCH[6.68868,tl=1320969600].testnet.rawtx'),
'eth': ('88FEFD-ETH[23.45495,40000].rawtx',
'B472BD-ETH[23.45495,40000].testnet.rawtx'),
'mm1': ('5881D2-MM1[1.23456,50000].rawtx',
'6BDB25-MM1[1.23456,50000].testnet.rawtx'),
'etc': ('ED3848-ETC[1.2345,40000].rawtx','')
},
}
chk_data = {
'ref_subwallet_sid': {
'98831F3A:32L':'D66B4885',
'98831F3A:1S':'20D95B09',
},
'ref_addrfile_chksum': {
'btc': ('6FEF 6FB9 7B13 5D91','424E 4326 CFFE 5F51'),
'ltc': ('AD52 C3FE 8924 AAF0','4EBE 2E85 E969 1B30'),
},
'ref_segwitaddrfile_chksum': {
'btc': ('06C1 9C87 F25C 4EE6','072C 8B07 2730 CB7A'),
'ltc': ('63DF E42A 0827 21C3','5DD1 D186 DBE1 59F2'),
},
'ref_bech32addrfile_chksum': {
'btc': ('9D2A D4B6 5117 F02E','0527 9C39 6C1B E39A'),
'ltc': ('FF1C 7939 5967 AB82','ED3D 8AA4 BED4 0B40'),
},
'ref_keyaddrfile_chksum': {
'btc': ('9F2D D781 1812 8BAD','88CC 5120 9A91 22C2'),
'ltc': ('B804 978A 8796 3ED4','98B5 AC35 F334 0398'),
},
'ref_passwdfile_b32_12_chksum': '7252 CD8D EF0D 3DB1',
'ref_passwdfile_b32_24_chksum': '8D56 3845 A072 A5B9',
'ref_passwdfile_b58_10_chksum': '534F CC1A 6701 9FED',
'ref_passwdfile_b58_20_chksum': 'DDD9 44B0 CA28 183F',
'ref_passwdfile_hex_32_chksum': '05C7 3678 E25E BC32',
'ref_passwdfile_hex_48_chksum': '7DBB FFD0 633E DE6F',
'ref_passwdfile_hex_64_chksum': 'F11D CB0A 8AE3 4D21',
'ref_passwdfile_bip39_12_chksum': 'BF57 02A3 5229 CF18',
'ref_passwdfile_bip39_18_chksum': '31D3 1656 B7DC 27CF',
'ref_passwdfile_bip39_24_chksum': 'E565 3A59 7D91 4671',
'ref_passwdfile_xmrseed_25_chksum': 'B488 21D3 4539 968D',
'ref_passwdfile_hex2bip39_12_chksum': '93AD 4AE2 03D1 8A0A',
}
cmd_group = ( # TODO: move to tooltest2
('ref_words_to_subwallet_chk1','subwallet generation from reference words file (long subseed)'),
('ref_words_to_subwallet_chk2','subwallet generation from reference words file (short subseed)'),
('ref_subwallet_addrgen1','subwallet address file generation (long subseed)'),
('ref_subwallet_addrgen2','subwallet address file generation (short subseed)'),
('ref_subwallet_keygen1','subwallet key-address file generation (long subseed)'),
('ref_subwallet_keygen2','subwallet key-address file generation (short subseed)'),
('ref_addrfile_chk', 'saved reference address file'),
('ref_segwitaddrfile_chk','saved reference address file (segwit)'),
('ref_bech32addrfile_chk','saved reference address file (bech32)'),
('ref_keyaddrfile_chk','saved reference key-address file'),
('ref_passwdfile_chk_b58_20','saved reference password file (base58, 20 chars)'),
('ref_passwdfile_chk_b58_10','saved reference password file (base58, 10 chars)'),
('ref_passwdfile_chk_b32_24','saved reference password file (base32, 24 chars)'),
('ref_passwdfile_chk_b32_12','saved reference password file (base32, 12 chars)'),
('ref_passwdfile_chk_hex_32','saved reference password file (hexadecimal, 32 chars)'),
('ref_passwdfile_chk_hex_48','saved reference password file (hexadecimal, 48 chars)'),
('ref_passwdfile_chk_hex_64','saved reference password file (hexadecimal, 64 chars)'),
('ref_passwdfile_chk_bip39_12','saved reference password file (BIP39, 12 words)'),
('ref_passwdfile_chk_bip39_18','saved reference password file (BIP39, 18 words)'),
('ref_passwdfile_chk_bip39_24','saved reference password file (BIP39, 24 words)'),
('ref_passwdfile_chk_xmrseed_25','saved reference password file (Monero new-style mnemonic, 25 words)'),
('ref_passwdfile_chk_hex2bip39_12','saved reference password file (hex-to-BIP39, 12 words)'),
# Create the fake inputs:
# ('txcreate8', 'transaction creation (8)'),
('ref_tx_chk', 'signing saved reference tx file'),
('ref_brain_chk_spc3', 'saved brainwallet (non-standard spacing)'),
('ref_dieroll_chk_seedtruncate','saved dieroll wallet with extra entropy bits'),
('ref_tool_decrypt', 'decryption of saved MMGen-encrypted file'),
)
@property
def nw_desc(self):
return '{} {}'.format(self.proto.coin,('Mainnet','Testnet')[self.proto.testnet])
def _get_ref_subdir_by_coin(self,coin):
return {'btc': '',
'bch': '',
'ltc': 'litecoin',
'eth': 'ethereum',
'etc': 'ethereum_classic',
'xmr': 'monero',
'zec': 'zcash',
'dash': 'dash' }[coin.lower()]
@property
def ref_subdir(self):
return self._get_ref_subdir_by_coin(self.proto.coin)
def ref_words_to_subwallet_chk1(self):
return self.ref_words_to_subwallet_chk('32L')
def ref_words_to_subwallet_chk2(self):
return self.ref_words_to_subwallet_chk('1S')
def ref_words_to_subwallet_chk(self,ss_idx):
wf = dfl_words_file
ocls = MMGenMnemonic
args = ['-d',self.tr.trash_dir,'-o',ocls.fmt_codes[-1],wf,ss_idx]
t = self.spawn('mmgen-subwalletgen',args,extra_desc='(generate subwallet)')
t.expect('Generating subseed {}'.format(ss_idx))
chk_sid = self.chk_data['ref_subwallet_sid']['98831F3A:{}'.format(ss_idx)]
fn = t.written_to_file(capfirst(ocls.desc))
assert chk_sid in fn,'incorrect filename: {} (does not contain {})'.format(fn,chk_sid)
ok()
t = self.spawn('mmgen-walletchk',[fn],extra_desc='(check subwallet)')
t.expect(r'Valid MMGen native mnemonic data for Seed ID ([0-9A-F]*)\b',regex=True)
sid = t.p.match.group(1)
assert sid == chk_sid,'subseed ID {} does not match expected value {}'.format(sid,chk_sid)
t.read()
return t
def ref_subwallet_addrgen(self,ss_idx,target='addr'):
wf = dfl_words_file
args = ['-d',self.tr.trash_dir,'--subwallet='+ss_idx,wf,'1-10']
t = self.spawn('mmgen-{}gen'.format(target),args)
t.expect('Generating subseed {}'.format(ss_idx))
chk_sid = self.chk_data['ref_subwallet_sid']['98831F3A:{}'.format(ss_idx)]
assert chk_sid == t.expect_getend('Checksum for .* data ',regex=True)[:8]
if target == 'key':
t.expect('Encrypt key list? (y/N): ','n')
fn = t.written_to_file(('Addresses','Secret keys')[target=='key'])
assert chk_sid in fn,'incorrect filename: {} (does not contain {})'.format(fn,chk_sid)
return t
def ref_subwallet_addrgen1(self):
return self.ref_subwallet_addrgen('32L')
def ref_subwallet_addrgen2(self):
return self.ref_subwallet_addrgen('1S')
def ref_subwallet_keygen1(self):
return self.ref_subwallet_addrgen('32L',target='key')
def ref_subwallet_keygen2(self):
return self.ref_subwallet_addrgen('1S',target='key')
def ref_addrfile_chk(
self,
ftype = 'addr',
coin = None,
subdir = None,
pfx = None,
mmtype = None,
add_args = [],
id_key = None,
pat = None ):
pat = pat or f'{self.nw_desc}.*Legacy'
af_key = 'ref_{}file'.format(ftype) + ('_' + id_key if id_key else '')
af_fn = TestSuiteRef.sources[af_key].format(pfx or self.altcoin_pfx,'' if coin else self.tn_ext)
af = joinpath(ref_dir,(subdir or self.ref_subdir,'')[ftype=='passwd'],af_fn)
coin_arg = [] if coin == None else ['--coin='+coin]
tool_cmd = ftype.replace('segwit','').replace('bech32','')+'file_chksum'
t = self.spawn('mmgen-tool',coin_arg+['--verbose','-p1',tool_cmd,af]+add_args)
if ftype == 'keyaddr':
t.do_decrypt_ka_data(hp=ref_kafile_hash_preset,pw=ref_kafile_pass,have_yes_opt=True)
chksum_key = '_'.join([af_key,'chksum'] + ([coin.lower()] if coin else []) + ([mmtype] if mmtype else []))
rc = self.chk_data[chksum_key]
ref_chksum = rc if (ftype == 'passwd' or coin) else rc[self.proto.base_coin.lower()][self.proto.testnet]
if pat:
t.expect(pat,regex=True)
t.expect(chksum_pat,regex=True)
m = t.p.match.group(0)
t.read()
cmp_or_die(ref_chksum,m)
return t
def ref_segwitaddrfile_chk(self):
if not 'S' in self.proto.mmtypes:
return skip(f'not supported by {self.proto.cls_name} protocol')
return self.ref_addrfile_chk(ftype='segwitaddr',pat='{}.*Segwit'.format(self.nw_desc))
def ref_bech32addrfile_chk(self):
if not 'B' in self.proto.mmtypes:
return skip(f'not supported by {self.proto.cls_name} protocol')
return self.ref_addrfile_chk(ftype='bech32addr',pat='{}.*Bech32'.format(self.nw_desc))
def ref_keyaddrfile_chk(self):
return self.ref_addrfile_chk(ftype='keyaddr')
def ref_passwdfile_chk(self,key,pat):
return self.ref_addrfile_chk(ftype='passwd',id_key=key,pat=pat)
def ref_passwdfile_chk_b58_20(self): return self.ref_passwdfile_chk(key='b58_20',pat=r'Base58.*len.* 20\b')
def ref_passwdfile_chk_b58_10(self): return self.ref_passwdfile_chk(key='b58_10',pat=r'Base58.*len.* 10\b')
def ref_passwdfile_chk_b32_24(self): return self.ref_passwdfile_chk(key='b32_24',pat=r'Base32.*len.* 24\b')
def ref_passwdfile_chk_b32_12(self): return self.ref_passwdfile_chk(key='b32_12',pat=r'Base32.*len.* 12\b')
def ref_passwdfile_chk_hex_32(self): return self.ref_passwdfile_chk(key='hex_32',pat=r'Hexadec.*len.* 32\b')
def ref_passwdfile_chk_hex_48(self): return self.ref_passwdfile_chk(key='hex_48',pat=r'Hexadec.*len.* 48\b')
def ref_passwdfile_chk_hex_64(self): return self.ref_passwdfile_chk(key='hex_64',pat=r'Hexadec.*len.* 64\b')
def ref_passwdfile_chk_bip39_12(self): return self.ref_passwdfile_chk(key='bip39_12',pat=r'BIP39.*len.* 12\b')
def ref_passwdfile_chk_bip39_18(self): return self.ref_passwdfile_chk(key='bip39_18',pat=r'BIP39.*len.* 18\b')
def ref_passwdfile_chk_bip39_24(self): return self.ref_passwdfile_chk(key='bip39_24',pat=r'BIP39.*len.* 24\b')
def ref_passwdfile_chk_xmrseed_25(self): return self.ref_passwdfile_chk(key='xmrseed_25',pat=r'Mon.*len.* 25\b')
def ref_passwdfile_chk_hex2bip39_12(self): return self.ref_passwdfile_chk(key='hex2bip39_12',pat=r'BIP39.*len.* 12\b')
def ref_tx_chk(self):
fn = self.sources['ref_tx_file'][self.proto.coin.lower()][bool(self.tn_ext)]
if not fn: return
tf = joinpath(ref_dir,self.ref_subdir,fn)
wf = dfl_words_file
self.write_to_tmpfile(pwfile,wpasswd)
pf = joinpath(self.tmpdir,pwfile)
return self.txsign(wf,tf,pf,save=False,has_label=True,view='y')
def ref_brain_chk_spc3(self):
return self.ref_brain_chk(bw_file=ref_bw_file_spc)
def ref_dieroll_chk_seedtruncate(self):
wf = joinpath(ref_dir,'overflow128.b6d')
return self.walletchk(wf,None,sid='8EC6D4A2')
def ref_tool_decrypt(self):
f = joinpath(ref_dir,ref_enc_fn)
if not g.debug_utf8:
disable_debug()
dec_file = joinpath(self.tmpdir,'famous.txt')
t = self.spawn('mmgen-tool', ['-q','decrypt',f,'outfile='+dec_file,'hash_preset=1'])
if not g.debug_utf8:
restore_debug()
t.passphrase('user data',tool_enc_passwd)
t.written_to_file('Decrypted data')
dec_txt = read_from_file(dec_file)
imsg_r(dec_txt)
cmp_or_die(sample_text+'\n',dec_txt) # file adds a newline to sample_text
return t
| gpl-3.0 | -1,095,619,924,337,389,300 | 45 | 119 | 0.688504 | false |
yunojuno/django-expiring-links | tests/settings.py | 1 | 2678 | from distutils.version import StrictVersion
from os import getenv
import django
DJANGO_VERSION = StrictVersion(django.get_version())
assert DJANGO_VERSION >= StrictVersion("2.2")
DEBUG = True
try:
from django.db.models import JSONField # noqa: F401
DATABASES = {
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "test.db",}
}
except ImportError:
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": getenv("TEST_DB_NAME", "request_token"),
"USER": getenv("TEST_DB_USER", "postgres"),
"PASSWORD": getenv("TEST_DB_PASSWORD", "postgres"),
"HOST": getenv("TEST_DB_HOST", "localhost"),
"PORT": getenv("TEST_DB_PORT", "5432"),
}
}
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.sessions",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.staticfiles",
"request_token",
"tests",
)
MIDDLEWARE = [
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"request_token.middleware.RequestTokenMiddleware",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
# insert your TEMPLATE_DIRS here
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"request_token.context_processors.request_token",
]
},
}
]
ALLOWED_HOSTS = ["localhost", "127.0.0.1"]
SECRET_KEY = "request_token"
ROOT_URLCONF = "tests.urls"
APPEND_SLASH = True
STATIC_URL = "/static/"
STATIC_ROOT = "./static"
TIME_ZONE = "UTC"
SITE_ID = 1
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
},
"simple": {"format": "%(levelname)s %(message)s"},
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple",
}
},
"loggers": {"request_token": {"handlers": ["console"], "level": "DEBUG"}},
}
assert DEBUG is True, "This project is only intended to be used for testing."
| mit | 524,550,944,304,967,600 | 25.514851 | 95 | 0.594847 | false |
sabas1080/InstagramPi | InstagramPi.py | 1 | 4484 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# InstagramPi Gafas for Instagram with Raspberry Pi Zero
#
# Autor: Andrés Sabas @ Feb 2017
#
#
# Use text editor to edit the script and type in valid Instagram username/password
import atexit
import picamera
import os
import time
import random
from os import listdir
from os.path import isfile, join
from random import randint
import atexit
import RPi.GPIO as GPIO
from InstagramAPI import InstagramAPI
butOp = 17 # Broadcom pin 17
butTake = 27 # Broadcom pin 17
ledPin = 16 # Broadcom pin 17
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(27, GPIO.IN, pull_up_down=GPIO.PUD_UP)
effects = ['none','negative','solarize','sketch','denoise','emboss','oilpaint','hatch','gpen','pastel','watercolor','film','blur','saturation','colorswap','washedout','posterise','colorpoint','colorbalance','cartoon','deinterlace1','deinterlace2']
saveIdx = -1 # Image index for saving (-1 = none set yet)
Option = True #Option Video(True) or Photo(False)
# Init camera and set up default values
camera = picamera.PiCamera()
atexit.register(camera.close)
camera.image_effect = effects[0]
#camera.resolution = sizeData[sizeMode][1]
#camera.crop = sizeData[sizeMode][2]
#camera.crop = (0.0, 0.0, 1.0, 1.0)
PhotoPath = "/home/pi/InstagramPi/images" # Change Directory to Folder with Pics that you want to upload
IGUSER = "xhabas" # Change to your Instagram USERNAME
PASSWD = "" # Change to your Instagram Password
#INSTAGRAM_FILE_NAME = "instagram.txt" # Text file to store your password
#if PASSWD == "":
# with open(INSTAGRAM_FILE_NAME, 'r') as f:
# PASSWD = f.readlines()[0];
# Change to your Photo Hashtag
IGCaption = "Hi from Raspberry Pi #PInstagram"
# Change to your Video Hashtag
IGCaptionVideo = "Hi from Raspberry Pi #PInstagram"
def TakeVideo():
os.chdir(PhotoPath)
#Delete previous videos
bashCommand = "rm -rf video.h264 video.avi photothumbnail.JPG"
os.system(bashCommand)
print ("Record Video")
camera.capture("photothumbnail.JPG", format='jpeg',thumbnail=None)
camera.start_recording('video.h264' )
time.sleep(10)
camera.stop_recording()
#Convert video to spectacles effect
# Thanks https://github.com/fabe/spectacles-cli/issues/1
bashCommand = "ffmpeg -i video.h264 -i overlay.png -map 0:a? -filter_complex \"scale=-2:720[rescaled];[rescaled]crop=ih:ih:iw/4:0[crop];[crop]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2\" output.h264"
os.system(bashCommand)
#Convert to format avi
bashCommand = "ffmpeg -f h264 -i output.h264 -c libx264 -an video.avi -y"
os.system(bashCommand)
print ("Now Uploading this Video to instagram")
igapi.uploadVideo("video.avi", thumbnail="photothumbnail.JPG", caption=IGCaptionVideo);
print ("Progress : Done")
#n = randint(600,1200)
#print ("Sleep upload for seconds: " + str(n))
#time.sleep(n)
def TakePhoto():
global saveIdx
print ("Take Photo")
os.chdir(PhotoPath)
ListFiles = [f for f in listdir(PhotoPath) if isfile(join(PhotoPath, f))]
print ("Total Photo in this folder:" + str (len(ListFiles)))
while True:
filename = PhotoPath + '/IMG_' + '%04d' % saveIdx + '.JPG'
if not os.path.isfile(filename): break
saveIdx += 1
if saveIdx > 9999: saveIdx = 0
camera.capture(filename, format='jpeg',thumbnail=None)
for i in range(len(ListFiles)):
photo = ListFiles[i]
print ("Progress :" + str([i+1]) + " of " + str(len(ListFiles)))
print ("Now Uploading this photo to instagram: " + photo)
#igapi.uploadPhoto(photo,caption=IGCaption,upload_id=None)
igapi.uploadPhoto(photo,caption=IGCaption,upload_id=None)
# sleep for random between 600 - 1200s
#n = randint(600,1200)
#print ("Sleep upload for seconds: " + str(n))
#time.sleep(n)
#Start Login and Uploading Photo
igapi = InstagramAPI(IGUSER,PASSWD)
igapi.login() # login
try:
while 1:
if GPIO.input(butTake): # button is released
if Option:
TakePhoto() #Take Photo
else:
TakeVideo() #Take Video
if GPIO.input(butOp):
Option=True; #Mode Video
else:
Option=False; #Mode Photo
except KeyboardInterrupt: # If CTRL+C is pressed, exit cleanly:
GPIO.cleanup() # cleanup all GPIO
| agpl-3.0 | -4,242,357,488,178,768,000 | 32.706767 | 247 | 0.662726 | false |
amolenaar/gaphor | gaphor/ui/tests/test_mainwindow.py | 1 | 1370 | import pytest
from gaphor.application import Session
from gaphor.core.modeling import Diagram
from gaphor.ui.abc import UIComponent
from gaphor.ui.event import DiagramOpened
@pytest.fixture
def session():
session = Session(
services=[
"event_manager",
"component_registry",
"element_factory",
"modeling_language",
"properties",
"main_window",
"namespace",
"diagrams",
"toolbox",
"elementeditor",
"export_menu",
"tools_menu",
]
)
yield session
session.shutdown()
def get_current_diagram(session):
return (
session.get_service("component_registry")
.get(UIComponent, "diagrams")
.get_current_diagram()
)
def test_creation(session):
# MainWindow should be created as resource
main_w = session.get_service("main_window")
main_w.open()
assert get_current_diagram(session) is None
def test_show_diagram(session):
element_factory = session.get_service("element_factory")
diagram = element_factory.create(Diagram)
main_w = session.get_service("main_window")
main_w.open()
event_manager = session.get_service("event_manager")
event_manager.handle(DiagramOpened(diagram))
assert get_current_diagram(session) == diagram
| lgpl-2.1 | 7,249,366,273,309,667,000 | 24.849057 | 60 | 0.625547 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.