code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import copy
from PyQt5 import QtCore
from PyQt5 import QtGui
import numpy
from sas.qtgui.Plotting.PlotterData import Data1D
from sas.qtgui.Plotting.PlotterData import Data2D
from sas.qtgui.Perspectives.Fitting.AssociatedComboBox import AssociatedComboBox
from sas.sascalc.fit.expression import check_constraints
model_header_captions = ['Parameter', 'Value', 'Min', 'Max', 'Units']
model_header_tooltips = ['Select parameter for fitting',
'Enter parameter value',
'Enter minimum value for parameter',
'Enter maximum value for parameter',
'Unit of the parameter']
poly_header_captions = ['Parameter', 'PD[ratio]', 'Min', 'Max', 'Npts', 'Nsigs',
'Function', 'Filename']
poly_header_tooltips = ['Select parameter for fitting',
'Enter polydispersity ratio (Std deviation/mean).\n'+
'For angles this can be either std deviation or half width (for uniform distributions) in degrees',
'Enter minimum value for parameter',
'Enter maximum value for parameter',
'Enter number of points for parameter',
'Enter number of sigmas parameter',
'Select distribution function',
'Select filename with user-definable distribution']
error_tooltip = 'Error value for fitted parameter'
header_error_caption = 'Error'
def replaceShellName(param_name, value):
"""
Updates parameter name from <param_name>[n_shell] to <param_name>value
"""
assert '[' in param_name
return param_name[:param_name.index('[')]+str(value)
def getIterParams(model):
"""
Returns a list of all multi-shell parameters in 'model'
"""
return list([par for par in model.iq_parameters if "[" in par.name])
def getMultiplicity(model):
"""
Finds out if 'model' has multishell parameters.
If so, returns the name of the counter parameter and the number of shells
"""
iter_params = getIterParams(model)
param_name = ""
param_length = 0
if iter_params:
param_length = iter_params[0].length
param_name = iter_params[0].length_control
if param_name is None and '[' in iter_params[0].name:
param_name = iter_params[0].name[:iter_params[0].name.index('[')]
return (param_name, param_length)
def createFixedChoiceComboBox(param, item_row):
"""
Determines whether param is a fixed-choice parameter, modifies items in item_row appropriately and returns a combo
box containing the fixed choices. Returns None if param is not fixed-choice.
item_row is a list of QStandardItem objects for insertion into the parameter table.
"""
# Determine whether this is a fixed-choice parameter. There are lots of conditionals, simply because the
# implementation is not yet concrete; there are several possible indicators that the parameter is fixed-choice.
# TODO: (when the sasmodels implementation is concrete, clean this up)
choices = None
if isinstance(param.choices, (list, tuple)) and len(param.choices) > 0:
# The choices property is concrete in sasmodels, probably will use this
choices = param.choices
elif isinstance(param.units, (list, tuple)):
choices = [str(x) for x in param.units]
cbox = None
if choices is not None:
# Use combo box for input, if it is fixed-choice
cbox = AssociatedComboBox(item_row[1], idx_as_value=True)
cbox.addItems(choices)
if param.default is not None and param.default <= len(choices):
# set the param default value in the combobox
cbox.setCurrentIndex(param.default)
item_row[2].setEditable(False)
item_row[3].setEditable(False)
return cbox
def addParametersToModel(parameters, kernel_module, is2D, model=None, view=None):
"""
Update local ModelModel with sasmodel parameters.
Actually appends to model, if model and view params are not None.
Always returns list of lists of QStandardItems.
"""
multishell_parameters = getIterParams(parameters)
multishell_param_name, _ = getMultiplicity(parameters)
if is2D:
params = [p for p in parameters.kernel_parameters if p.type != 'magnetic']
else:
params = parameters.iq_parameters
rows = []
for param in params:
# don't include shell parameters
if param.name == multishell_param_name:
continue
# Modify parameter name from <param>[n] to <param>1
item_name = param.name
if param in multishell_parameters:
continue
item1 = QtGui.QStandardItem(item_name)
item1.setCheckable(True)
item1.setEditable(False)
# check for polydisp params
if param.polydisperse:
poly_item = QtGui.QStandardItem("Polydispersity")
poly_item.setEditable(False)
poly_item.setSelectable(False)
item1_1 = QtGui.QStandardItem("Distribution")
item1_1.setEditable(False)
item1_1.setSelectable(False)
# Find param in volume_params
poly_pars = copy.deepcopy(parameters.form_volume_parameters)
if is2D:
poly_pars += parameters.orientation_parameters
for p in poly_pars:
if p.name != param.name:
continue
width = kernel_module.getParam(p.name+'.width')
ptype = kernel_module.getParam(p.name+'.type')
item1_2 = QtGui.QStandardItem(str(width))
item1_2.setEditable(False)
item1_2.setSelectable(False)
item1_3 = QtGui.QStandardItem()
item1_3.setEditable(False)
item1_3.setSelectable(False)
item1_4 = QtGui.QStandardItem()
item1_4.setEditable(False)
item1_4.setSelectable(False)
item1_5 = QtGui.QStandardItem(ptype)
item1_5.setEditable(False)
item1_5.setSelectable(False)
poly_item.appendRow([item1_1, item1_2, item1_3, item1_4, item1_5])
break
# Add the polydisp item as a child
item1.appendRow([poly_item])
# Param values
item2 = QtGui.QStandardItem(str(param.default))
item3 = QtGui.QStandardItem(str(param.limits[0]))
item4 = QtGui.QStandardItem(str(param.limits[1]))
item5 = QtGui.QStandardItem(str(param.units))
item5.setEditable(False)
# Check if fixed-choice (returns combobox, if so, also makes some items uneditable)
row = [item1, item2, item3, item4, item5]
cbox = createFixedChoiceComboBox(param, row)
# Append to the model and use the combobox, if required
if None not in (model, view):
model.appendRow(row)
if cbox:
view.setIndexWidget(item2.index(), cbox)
rows.append(row)
return rows
def addSimpleParametersToModel(parameters, is2D, parameters_original=None, model=None, view=None, row_num=None):
"""
Update local ModelModel with sasmodel parameters (non-dispersed, non-magnetic)
Actually appends to model, if model and view params are not None.
Always returns list of lists of QStandardItems.
parameters_original: list of parameters before any tagging on their IDs, e.g. for product model (so that those are
the display names; see below)
"""
if is2D:
params = [p for p in parameters.kernel_parameters if p.type != 'magnetic']
else:
params = parameters.iq_parameters
if parameters_original:
# 'parameters_original' contains the parameters as they are to be DISPLAYED, while 'parameters'
# contains the parameters as they were renamed; this is for handling name collisions in product model.
# The 'real name' of the parameter will be stored in the item's user data.
if is2D:
params_orig = [p for p in parameters_original.kernel_parameters if p.type != 'magnetic']
else:
params_orig = parameters_original.iq_parameters
else:
# no difference in names anyway
params_orig = params
rows = []
for param, param_orig in zip(params, params_orig):
# Create the top level, checkable item
item_name = param_orig.name
item1 = QtGui.QStandardItem(item_name)
item1.setData(param.name, QtCore.Qt.UserRole)
item1.setCheckable(False)
item1.setEditable(False)
# Param values
# TODO: add delegate for validation of cells
item2 = QtGui.QStandardItem(str(param.default))
item3 = QtGui.QStandardItem(str(param.limits[0]))
item4 = QtGui.QStandardItem(str(param.limits[1]))
item5 = QtGui.QStandardItem(str(param.units))
item5.setEditable(False)
# Check if fixed-choice (returns combobox, if so, also makes some items uneditable)
row = [item1, item2, item3, item4, item5]
cbox = createFixedChoiceComboBox(param, row)
# Append to the model and use the combobox, if required
if None not in (model, view):
if row_num is None:
model.appendRow(row)
else:
model.insertRow(row_num, row)
row_num += 1
if cbox:
item1.setCheckable(False)
item3.setText("")
item4.setText("")
item3.setEditable(False)
item4.setEditable(False)
view.setIndexWidget(item2.index(), cbox)
else:
item1.setCheckable(True)
rows.append(row)
return rows
def markParameterDisabled(model, row):
"""Given the QModel row number, format to show it is not available for fitting"""
# If an error column is present, there are a total of 6 columns.
items = [model.item(row, c) for c in range(6)]
model.blockSignals(True)
for item in items:
if item is None:
continue
item.setEditable(False)
item.setCheckable(False)
item = items[0]
font = QtGui.QFont()
font.setItalic(True)
item.setFont(font)
item.setForeground(QtGui.QBrush(QtGui.QColor(100, 100, 100)))
item.setToolTip("This parameter cannot be fitted.")
model.blockSignals(False)
def addCheckedListToModel(model, param_list):
"""
Add a QItem to model. Makes the QItem checkable
"""
assert isinstance(model, QtGui.QStandardItemModel)
item_list = [QtGui.QStandardItem(item) for item in param_list]
item_list[0].setCheckable(True)
model.appendRow(item_list)
def addHeadingRowToModel(model, name):
"""adds a non-interactive top-level row to the model"""
header_row = [QtGui.QStandardItem() for i in range(5)]
header_row[0].setText(name)
font = header_row[0].font()
font.setBold(True)
header_row[0].setFont(font)
for item in header_row:
item.setEditable(False)
item.setCheckable(False)
item.setSelectable(False)
model.appendRow(header_row)
def addHeadersToModel(model):
"""
Adds predefined headers to the model
"""
for i, item in enumerate(model_header_captions):
model.setHeaderData(i, QtCore.Qt.Horizontal, item)
model.header_tooltips = copy.copy(model_header_tooltips)
def addErrorHeadersToModel(model):
"""
Adds predefined headers to the model
"""
model_header_error_captions = copy.copy(model_header_captions)
model_header_error_captions.insert(2, header_error_caption)
for i, item in enumerate(model_header_error_captions):
model.setHeaderData(i, QtCore.Qt.Horizontal, item)
model_header_error_tooltips = copy.copy(model_header_tooltips)
model_header_error_tooltips.insert(2, error_tooltip)
model.header_tooltips = copy.copy(model_header_error_tooltips)
def addPolyHeadersToModel(model):
"""
Adds predefined headers to the model
"""
for i, item in enumerate(poly_header_captions):
model.setHeaderData(i, QtCore.Qt.Horizontal, item)
model.header_tooltips = copy.copy(poly_header_tooltips)
def addErrorPolyHeadersToModel(model):
"""
Adds predefined headers to the model
"""
poly_header_error_captions = copy.copy(poly_header_captions)
poly_header_error_captions.insert(2, header_error_caption)
for i, item in enumerate(poly_header_error_captions):
model.setHeaderData(i, QtCore.Qt.Horizontal, item)
poly_header_error_tooltips = copy.copy(poly_header_tooltips)
poly_header_error_tooltips.insert(2, error_tooltip)
model.header_tooltips = copy.copy(poly_header_error_tooltips)
def addShellsToModel(parameters, model, index, row_num=None, view=None):
"""
Find out multishell parameters and update the model with the requested number of them.
Inserts them after the row at row_num, if not None; otherwise, appends to end.
If view param is not None, supports fixed-choice params.
Returns a list of lists of QStandardItem objects.
"""
multishell_parameters = getIterParams(parameters)
rows = []
for i in range(index):
for par in multishell_parameters:
# Create the name: <param>[<i>], e.g. "sld1" for parameter "sld[n]"
param_name = replaceShellName(par.name, i+1)
item1 = QtGui.QStandardItem(param_name)
item1.setCheckable(True)
# check for polydisp params
if par.polydisperse:
poly_item = QtGui.QStandardItem("Polydispersity")
item1_1 = QtGui.QStandardItem("Distribution")
# Find param in volume_params
for p in parameters.form_volume_parameters:
if p.name != par.name:
continue
item1_2 = QtGui.QStandardItem(str(p.default))
item1_3 = QtGui.QStandardItem(str(p.limits[0]))
item1_4 = QtGui.QStandardItem(str(p.limits[1]))
item1_5 = QtGui.QStandardItem(str(p.units))
poly_item.appendRow([item1_1, item1_2, item1_3, item1_4, item1_5])
break
item1.appendRow([poly_item])
item2 = QtGui.QStandardItem(str(par.default))
item3 = QtGui.QStandardItem(str(par.limits[0]))
item4 = QtGui.QStandardItem(str(par.limits[1]))
item5 = QtGui.QStandardItem(str(par.units))
item5.setEditable(False)
# Check if fixed-choice (returns combobox, if so, also makes some items uneditable)
row = [item1, item2, item3, item4, item5]
cbox = createFixedChoiceComboBox(par, row)
# Apply combobox if required
if None not in (view, cbox):
# set the min/max cell to be empty
item3.setText("")
item4.setText("")
# Always add to the model
if row_num is None:
model.appendRow(row)
else:
model.insertRow(row_num, row)
row_num += 1
if cbox is not None:
view.setIndexWidget(item2.index(), cbox)
rows.append(row)
return rows
def calculateChi2(reference_data, current_data, weight):
"""
Calculate Chi2 value between two sets of data
"""
if reference_data is None or current_data is None:
return None
chisqr = None
if reference_data is None:
return chisqr
# temporary default values for index and weight
index = None
# Get data: data I, theory I, and data dI in order
if isinstance(reference_data, Data2D):
if index is None:
index = numpy.ones(len(current_data.data), dtype=bool)
if weight is not None:
current_data.err_data = weight
# get rid of zero error points
index = index & (current_data.err_data != 0)
index = index & (numpy.isfinite(current_data.data))
fn = current_data.data[index]
gn = reference_data.data[index]
en = current_data.err_data[index]
else:
if index is None:
index = numpy.ones(len(current_data.y), dtype=bool)
if current_data.dy is None or current_data.dy == []:
dy = numpy.ones(len(current_data.y))
else:
dy = weight
dy[dy == 0] = 1
fn = current_data.y[index]
gn = reference_data.y
en = dy[index]
x_current = current_data.x
x_reference = reference_data.x
if len(fn) > len(gn):
fn = fn[0:len(gn)]
en = en[0:len(gn)]
else:
try:
y = numpy.zeros(len(current_data.y))
begin = 0
for i, x_value in enumerate(x_reference):
if x_value in x_current:
begin = i
break
end = len(x_reference)
endl = 0
for i, x_value in enumerate(list(x_reference)[::-1]):
if x_value in x_current:
endl = i
break
en = en[begin:end-endl]
y = (fn - gn[begin:end-endl])/en
except ValueError:
# value errors may show up every once in a while for malformed columns,
# just reuse what's there already
pass
# Calculate the residual
try:
res = (fn - gn) / en
except ValueError:
#print "Chi2 calculations: Unmatched lengths %s, %s, %s" % (len(fn), len(gn), len(en))
return None
residuals = res[numpy.isfinite(res)]
chisqr = numpy.average(residuals * residuals)
return chisqr
def residualsData1D(reference_data, current_data, weights):
"""
Calculate the residuals for difference of two Data1D sets
"""
# temporary default values for index and weight
index = None
weight = None
# 1d theory from model_thread is only in the range of index
if current_data.dy is None or current_data.dy == []:
dy = numpy.ones(len(current_data.y))
else:
#dy = weight if weight is not None else numpy.ones(len(current_data.y))
if numpy.all(current_data.dy):
dy = current_data.dy
else:
dy = weights
dy[dy == 0] = 1
fn = current_data.y[index][0]
gn = reference_data.y
en = dy[index][0]
# x values
x_current = current_data.x
x_reference = reference_data.x
# build residuals
residuals = Data1D()
if len(fn) == len(gn):
y = (fn - gn)/en
residuals.y = -y
elif len(fn) > len(gn):
residuals.y = -(fn - gn[1:len(fn)])/en
else:
try:
y = numpy.zeros(len(current_data.y))
begin = 0
for i, x_value in enumerate(x_reference):
if x_value in x_current:
begin = i
break
end = len(x_reference)
endl = 0
for i, x_value in enumerate(list(x_reference)[::-1]):
if x_value in x_current:
endl = i
break
en = en[begin:end-endl]
y = (fn - gn[begin:end-endl])/en
residuals.y = -y
except ValueError:
# value errors may show up every once in a while for malformed columns,
# just reuse what's there already
pass
residuals.x = current_data.x[index][0]
residuals.dy = None
residuals.dx = None
residuals.dxl = None
residuals.dxw = None
residuals.ytransform = 'y'
if reference_data.isSesans:
residuals.xtransform = 'x'
residuals.xaxis('\\rm{z} ', 'A')
# For latter scale changes
else:
residuals.xaxis('\\rm{Q} ', 'A^{-1}')
residuals.yaxis('\\rm{Residuals} ', 'normalized')
return residuals
def residualsData2D(reference_data, current_data, weight):
"""
Calculate the residuals for difference of two Data2D sets
"""
# build residuals
residuals = Data2D()
# Not for trunk the line below, instead use the line above
current_data.clone_without_data(len(current_data.data), residuals)
residuals.data = None
fn = current_data.data
gn = reference_data.data
if weight is None:
en = current_data.err_data
else:
en = weight
residuals.data = (fn - gn) / en
residuals.qx_data = current_data.qx_data
residuals.qy_data = current_data.qy_data
residuals.q_data = current_data.q_data
residuals.err_data = None
residuals.xmin = min(residuals.qx_data)
residuals.xmax = max(residuals.qx_data)
residuals.ymin = min(residuals.qy_data)
residuals.ymax = max(residuals.qy_data)
residuals.q_data = current_data.q_data
residuals.mask = current_data.mask
residuals.scale = 'linear'
# check the lengths
if len(residuals.data) != len(residuals.q_data):
return None
return residuals
def plotResiduals(reference_data, current_data, weights):
"""
Create Data1D/Data2D with residuals, ready for plotting
"""
data_copy = copy.deepcopy(current_data)
# Get data: data I, theory I, and data dI in order
method_name = current_data.__class__.__name__
residuals_dict = {"Data1D": residualsData1D,
"Data2D": residualsData2D}
try:
residuals = residuals_dict[method_name](reference_data, data_copy, weights)
except ValueError:
return None
theory_name = str(current_data.name.split()[0])
res_name = reference_data.name if reference_data.name else reference_data.filename
residuals.name = "Residuals for " + str(theory_name) + "[" + res_name + "]"
residuals.title = residuals.name
# when 2 data have the same id override the 1 st plotted
# include the last part if keeping charts for separate models is required
residuals.id = "res" + str(reference_data.id) # + str(theory_name)
# group_id specify on which panel to plot this data
group_id = reference_data.group_id
residuals.group_id = "res" + str(group_id)
# Symbol
residuals.symbol = 0
residuals.hide_error = False
return residuals
def plotPolydispersities(model):
plots = []
if model is None:
return plots
# test for model being a sasmodels.sasview_model.SasviewModel?
for name in model.dispersion.keys():
xarr, yarr = model.get_weights(name)
if len(xarr) <= 1: # param name not found or no polydisp.
continue
# create Data1D as in residualsData1D() and fill x/y members
# similar to FittingLogic._create1DPlot() but different data/axes
data1d = Data1D(x=xarr, y=yarr)
xunit = model.details[name][0]
data1d.xtransform = 'x'
data1d.ytransform = 'y'
data1d.xaxis(r'\rm{{{}}}'.format(name.replace('_', '\_')), xunit)
data1d.yaxis(r'\rm{probability}', 'normalized')
data1d.scale = 'linear'
data1d.symbol = 'Line'
data1d.name = "{} polydispersity".format(name)
data1d.id = data1d.name # placeholder, has to be completed later
data1d.plot_role = Data1D.ROLE_RESIDUAL
plots.append(data1d)
return plots
def binary_encode(i, digits):
return [i >> d & 1 for d in range(digits)]
def getWeight(data, is2d, flag=None):
"""
Received flag and compute error on data.
:param flag: flag to transform error of data.
"""
weight = None
if data is None:
return []
if is2d:
if not hasattr(data, 'err_data'):
return []
dy_data = data.err_data
data = data.data
else:
if not hasattr(data, 'dy'):
return []
dy_data = data.dy
data = data.y
if flag == 0:
weight = numpy.ones_like(data)
elif flag == 1:
weight = dy_data
elif flag == 2:
weight = numpy.sqrt(numpy.abs(data))
elif flag == 3:
weight = numpy.abs(data)
return weight
def updateKernelWithResults(kernel, results):
"""
Takes model kernel and applies results dict to its parameters,
returning the modified (deep) copy of the kernel.
"""
assert isinstance(results, dict)
local_kernel = copy.deepcopy(kernel)
for parameter in results.keys():
# Update the parameter value - note: this supports +/-inf as well
local_kernel.setParam(parameter, results[parameter][0])
return local_kernel
def getStandardParam(model=None):
"""
Returns a list with standard parameters for the current model
"""
param = []
num_rows = model.rowCount()
if num_rows < 1:
return param
for row in range(num_rows):
param_name = model.item(row, 0).text()
checkbox_state = model.item(row, 0).checkState() == QtCore.Qt.Checked
value = model.item(row, 1).text()
column_shift = 0
if model.columnCount() == 5: # no error column
error_state = False
error_value = 0.0
else:
error_state = True
error_value = model.item(row, 2).text()
column_shift = 1
min_state = True
max_state = True
min_value = model.item(row, 2+column_shift).text()
max_value = model.item(row, 3+column_shift).text()
unit = ""
if model.item(row, 4+column_shift) is not None:
u = model.item(row, 4+column_shift).text()
# This isn't a unit if it is a number (polyd./magn.)
unit = "" if isNumber(u) else u
param.append([checkbox_state, param_name, value, "",
[error_state, error_value],
[min_state, min_value],
[max_state, max_value], unit])
return param
def isNumber(s):
"""
Checks if string 's' is an int/float
"""
if s.isdigit():
# check int
return True
else:
try:
# check float
_ = float(s)
except ValueError:
return False
return True
def getOrientationParam(kernel_module=None):
"""
Get the dictionary with orientation parameters
"""
param = []
if kernel_module is None:
return None
for param_name in list(kernel_module.params.keys()):
name = param_name
value = kernel_module.params[param_name]
min_state = True
max_state = True
error_state = False
error_value = 0.0
checkbox_state = True #??
details = kernel_module.details[param_name] #[unit, mix, max]
param.append([checkbox_state, name, value, "",
[error_state, error_value],
[min_state, details[1]],
[max_state, details[2]], details[0]])
return param
def formatParameters(parameters, Check=True):
"""
Prepare the parameter string in the standard SasView layout
"""
assert parameters is not None
assert isinstance(parameters, list)
output_string = "sasview_parameter_values:"
for parameter in parameters:
# recast tuples into strings
parameter = [str(p) for p in parameter]
output_string += ",".join([p for p in parameter if p is not None])
output_string += ":"
if Check == False:
new_string = output_string.replace(':', '\n')
return new_string
else:
return output_string
def formatParametersExcel(parameters):
"""
Prepare the parameter string in the Excel format (tab delimited)
"""
assert parameters is not None
assert isinstance(parameters, list)
crlf = chr(13) + chr(10)
tab = chr(9)
output_string = ""
# names
names = ""
values = ""
check = ""
for parameter in parameters:
names += parameter[0]+tab
# Add the error column if fitted
if parameter[1] == "True" and parameter[3] is not None:
names += parameter[0]+"_err"+tab
values += parameter[2]+tab
check += parameter[1]+tab
if parameter[1] == "True" and parameter[3] is not None:
values += parameter[3]+tab
# add .npts and .nsigmas when necessary
if parameter[0][-6:] == ".width":
names += parameter[0].replace('.width', '.nsigmas') + tab
names += parameter[0].replace('.width', '.npts') + tab
values += parameter[5] + tab + parameter[4] + tab
output_string = names + crlf + values + crlf + check
return output_string
def formatParametersLatex(parameters):
"""
Prepare the parameter string in latex
"""
assert parameters is not None
assert isinstance(parameters, list)
output_string = r'\begin{table}'
output_string += r'\begin{tabular}[h]'
crlf = chr(13) + chr(10)
output_string += '{|'
output_string += 'l|l|'*len(parameters)
output_string += r'}\hline'
output_string += crlf
for index, parameter in enumerate(parameters):
name = parameter[0] # Parameter name
output_string += name.replace('_', r'\_') # Escape underscores
# Add the error column if fitted
if parameter[1] == "True" and parameter[3] is not None:
output_string += ' & '
output_string += parameter[0]+r'\_err'
if index < len(parameters) - 1:
output_string += ' & '
# add .npts and .nsigmas when necessary
if parameter[0][-6:] == ".width":
output_string += parameter[0].replace('.width', '.nsigmas') + ' & '
output_string += parameter[0].replace('.width', '.npts')
if index < len(parameters) - 1:
output_string += ' & '
output_string += r'\\ \hline'
output_string += crlf
# Construct row of values and errors
for index, parameter in enumerate(parameters):
output_string += parameter[2]
if parameter[1] == "True" and parameter[3] is not None:
output_string += ' & '
output_string += parameter[3]
if index < len(parameters) - 1:
output_string += ' & '
# add .npts and .nsigmas when necessary
if parameter[0][-6:] == ".width":
output_string += parameter[5] + ' & '
output_string += parameter[4]
if index < len(parameters) - 1:
output_string += ' & '
output_string += r'\\ \hline'
output_string += crlf
output_string += r'\end{tabular}'
output_string += r'\end{table}'
return output_string
def isParamPolydisperse(param_name, kernel_params, is2D=False):
"""
Simple lookup for polydispersity for the given param name
"""
parameters = kernel_params.form_volume_parameters
if is2D:
parameters += kernel_params.orientation_parameters
has_poly = False
for param in parameters:
if param.name==param_name and param.polydisperse:
has_poly = True
break
return has_poly
def checkConstraints(symtab, constraints):
# type: (Dict[str, float], Sequence[Tuple[str, str]]) -> str
"""
Compile and evaluate the constraints in the context of the initial values
and return the list of errors.
Errors are returned as an html string where errors are tagged with <b>
markups:
Unknown symbol: tags unknown symbols in *constraints*
Syntax error: tags the beginning of a syntax error in *constraints*
Cyclic dependency: tags comma separated parameters that have
cyclic dependency
The errors are wrapped in a <div class = "error"> and a style header is
added
"""
# Note: dict(constraints) will choose the latest definition if
# there are duplicates.
errors = "<br>".join(check_constraints(symtab, dict(constraints),
html=True))
# wrap everything in <div class = "error">
if errors:
errors = "<div class = \"error\">" + errors + "</div>"
header = "<style type=\"text/css\"> div.error b { "\
"font-weight: normal; color:red;}</style>"
return header + errors
else:
return []
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Perspectives/Fitting/FittingUtilities.py
| 0.534855 | 0.355803 |
FittingUtilities.py
|
pypi
|
import numpy as np
# defaults in cgs unit
_SAMPLE_A_SIZE = [1.27]
_SOURCE_A_SIZE = [3.81]
_SAMPLE_DISTANCE = [1627, 0]
_SAMPLE_OFFSET = [0, 0]
_SAMPLE_SIZE = [2.54]
_SAMPLE_THICKNESS = 0.2
_D_DISTANCE = [1000, 0]
_D_SIZE = [128, 128]
_D_PIX_SIZE = [0.5, 0.5]
_MIN = 0.0
_MAX = 50.0
_INTENSITY = 368428
_WAVE_LENGTH = 6.0
_WAVE_SPREAD = 0.125
_MASS = 1.67492729E-24 # [gr]
_LAMBDA_ARRAY = [[0, 1e+16], [_INTENSITY, _INTENSITY]]
class Aperture(object):
"""
An object class that defines the aperture variables
"""
def __init__(self):
# assumes that all aligned at the centers
# aperture_size [diameter] for pinhole, [dx, dy] for rectangular
self.sample_size = _SAMPLE_A_SIZE
self.source_size = _SOURCE_A_SIZE
self.sample_distance = _SAMPLE_DISTANCE
def set_source_size(self, size=[]):
"""
Set the source aperture size
"""
if len(size) == 0:
self.source_size = 0.0
else:
self.source_size = size
validate(size[0])
def set_sample_size(self, size=[]):
"""
Set the sample aperture size
"""
if len(size) == 0:
self.sample_size = 0.0
else:
self.sample_size = size
validate(size[0])
def set_sample_distance(self, distance=[]):
"""
Set the sample aperture distance
"""
if len(distance) == 0:
self.sample_distance = 0.0
else:
self.sample_distance = distance
validate(distance[0])
class Sample(object):
"""
An object class that defines the sample variables
"""
def __init__(self):
# assumes that all aligned at the centers
# source2sample or sample2detector distance
self.distance = _SAMPLE_OFFSET
self.size = _SAMPLE_SIZE
self.thickness = _SAMPLE_THICKNESS
def set_size(self, size=[]):
"""
Set the sample size
"""
if len(size) == 0:
self.size = 0.0
else:
self.size = size
validate(size[0])
def set_thickness(self, thickness=0.0):
"""
Set the sample thickness
"""
self.thickness = thickness
validate(thickness)
def set_distance(self, distance=[]):
"""
Set the sample distance
"""
if len(distance) == 0:
self.distance = 0.0
else:
self.distance = distance
if distance[0] != 0.0:
validate(distance[0])
class Detector(object):
"""
An object class that defines the detector variables
"""
def __init__(self):
# assumes that all aligned at the centers
# source2sample or sample2detector distance
self.distance = _D_DISTANCE
self.size = _D_SIZE
self.pix_size = _D_PIX_SIZE
def set_size(self, size=[]):
"""
Set the detector size
"""
if len(size) == 0:
self.size = 0
else:
# TODO: Make sure detector size is number of pixels
# Could be detector dimensions in e.g., mm, but
# the resolution calculator assumes it is pixels.
# Being pixels, it has to be integers rather than float
self.size = [int(s) for s in size]
validate(size[0])
def set_pix_size(self, size=[]):
"""
Set the detector pix_size
"""
if len(size) == 0:
self.pix_size = 0
else:
self.pix_size = size
validate(size[0])
def set_distance(self, distance=[]):
"""
Set the detector distance
"""
if len(distance) == 0:
self.distance = 0
else:
self.distance = distance
validate(distance[0])
class Neutron(object):
"""
An object that defines the wavelength variables
"""
def __init__(self):
# neutron mass in cgs unit
self.mass = _MASS
# wavelength
self.wavelength = _WAVE_LENGTH
# wavelength spread (FWHM)
self.wavelength_spread = _WAVE_SPREAD
# wavelength spectrum
self.spectrum = self.get_default_spectrum()
# intensity in counts/sec
self.intensity = np.interp(self.wavelength,
self.spectrum[0],
self.spectrum[1],
0.0,
0.0)
# min max range of the spectrum
self.min = min(self.spectrum[0])
self.max = max(self.spectrum[0])
# wavelength band
self.band = [self.min, self.max]
# default unit of the thickness
self.wavelength_unit = 'A'
def set_full_band(self):
"""
set band to default value
"""
self.band = self.spectrum
def set_spectrum(self, spectrum):
"""
Set spectrum
:param spectrum: numpy array
"""
self.spectrum = spectrum
self.setup_spectrum()
def setup_spectrum(self):
"""
To set the wavelength spectrum, and intensity, assumes
wavelength is already within the spectrum
"""
spectrum = self.spectrum
intensity = np.interp(self.wavelength,
spectrum[0],
spectrum[1],
0.0,
0.0)
self.set_intensity(intensity)
# min max range of the spectrum
self.min = min(self.spectrum[0])
self.max = max(self.spectrum[0])
# set default band
self.set_band([self.min, self.max])
def set_band(self, band=[]):
"""
To set the wavelength band
:param band: array of [min, max]
"""
# check if the wavelength is in range
if min(band) < self.min or max(band) > self.max:
raise ValueError("band out of range")
self.band = band
def set_intensity(self, intensity=368428):
"""
Sets the intensity in counts/sec
"""
self.intensity = intensity
validate(intensity)
def set_wavelength(self, wavelength=_WAVE_LENGTH):
"""
Sets the wavelength
"""
# check if the wavelength is in range
if wavelength < min(self.band) or wavelength > max(self.band):
raise ValueError("wavelength out of range")
self.wavelength = wavelength
validate(wavelength)
self.intensity = np.interp(self.wavelength,
self.spectrum[0],
self.spectrum[1],
0.0,
0.0)
def set_mass(self, mass=_MASS):
"""
Sets the wavelength
"""
self.mass = mass
validate(mass)
def set_wavelength_spread(self, spread=_WAVE_SPREAD):
"""
Sets the wavelength spread
"""
self.wavelength_spread = spread
if spread != 0.0:
validate(spread)
def get_intensity(self):
"""
To get the value of intensity
"""
return self.intensity
def get_wavelength(self):
"""
To get the value of wavelength
"""
return self.wavelength
def get_mass(self):
"""
To get the neutron mass
"""
return self.mass
def get_wavelength_spread(self):
"""
To get the value of wavelength spread
"""
return self.wavelength_spread
def get_ramdom_value(self):
"""
To get the value of wave length
"""
return self.wavelength
def get_spectrum(self):
"""
To get the wavelength spectrum
"""
return self.spectrum
def get_default_spectrum(self):
"""
get default spectrum
"""
return np.array(_LAMBDA_ARRAY)
def get_band(self):
"""
To get the wavelength band
"""
return self.band
def plot_spectrum(self):
"""
To plot the wavelength spactrum
: requirement: matplotlib.pyplot
"""
try:
import matplotlib.pyplot as plt
plt.plot(self.spectrum[0], self.spectrum[1], linewidth=2, color='r')
plt.legend(['Spectrum'], loc='best')
plt.show()
except:
raise RuntimeError("Can't import matplotlib required to plot...")
class TOF(Neutron):
"""
TOF: make list of wavelength and wave length spreads
"""
def __init__(self):
"""
Init
"""
Neutron.__init__(self)
#self.switch = switch
self.wavelength_list = [self.wavelength]
self.wavelength_spread_list = [self.wavelength_spread]
self.intensity_list = self.get_intensity_list()
def get_intensity_list(self):
"""
get list of the intensity wrt wavelength_list
"""
out = np.interp(self.wavelength_list,
self.spectrum[0],
self.spectrum[1],
0.0,
0.0)
return out
def get_wave_list(self):
"""
Get wavelength and wavelength_spread list
"""
return self.wavelength_list, self.wavelength_spread_list
def set_wave_list(self, wavelength=[]):
"""
Set wavelength list
:param wavelength: list of wavelengths
"""
self.wavelength_list = wavelength
def set_wave_spread_list(self, wavelength_spread=[]):
"""
Set wavelength_spread list
:param wavelength_spread: list of wavelength spreads
"""
self.wavelength_spread_list = wavelength_spread
def validate(value=None):
"""
Check if the value is folat > 0.0
:return value: True / False
"""
try:
val = float(value)
if val >= 0:
val = True
else:
val = False
except:
val = False
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/calculator/instrument.py
| 0.657428 | 0.559591 |
instrument.py
|
pypi
|
class SlitlengthCalculator(object):
"""
compute slit length from SAXSess beam profile (1st col. Q , 2nd col. I ,
and 3rd col. dI.: don't need the 3rd)
"""
def __init__(self):
# x data
self.x = None
# y data
self.y = None
# default slit length
self.slit_length = 0.0
# The unit is unknown from SAXSess profile:
# It seems 1/nm but it could be not fixed,
# so users should be notified to determine the unit by themselves.
self.slit_length_unit = "unknown"
def set_data(self, x=None, y=None):
"""
Receive two vector x, y and prepare the slit calculator for
computation.
:param x: array
:param y: array
"""
self.x = x
self.y = y
def calculate_slit_length(self):
"""
Calculate slit length.
:return: the slit length calculated value.
"""
# None data do nothing
if self.y is None or self.x is None:
return
# set local variable
y = self.y
x = self.x
# find max y
max_y = y.max()
# initial values
y_sum = 0.0
y_max = 0.0
ind = 0
# sum 10 or more y values until getting max_y,
while True:
if ind >= 10 and y_max == max_y:
break
y_sum = y_sum + y[ind]
if y[ind] > y_max:
y_max = y[ind]
ind += 1
# find the average value/2 of the top values
y_half = y_sum/(2.0*ind)
# defaults
y_half_d = 0.0
ind = 0
# find indices where it crosses y = y_half.
while True:
# no need to check when ind == 0
ind += 1
# y value and ind just after passed the spot of the half height
y_half_d = y[ind]
if y[ind] < y_half:
break
# y value and ind just before passed the spot of the half height
y_half_u = y[ind - 1]
# get corresponding x values
x_half_d = x[ind]
x_half_u = x[ind - 1]
# calculate x at y = y_half using linear interpolation
if y_half_u == y_half_d:
x_half = (x_half_d + x_half_u)/2.0
else:
x_half = ((x_half_u * (y_half - y_half_d)
+ x_half_d * (y_half_u - y_half))
/ (y_half_u - y_half_d))
# Our slit length is half width, so just give half beam value
slit_length = x_half
# set slit_length
self.slit_length = slit_length
return self.slit_length
def get_slit_length_unit(self):
"""
:return: the slit length unit.
"""
return self.slit_length_unit
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/calculator/slit_length_calculator.py
| 0.732305 | 0.622603 |
slit_length_calculator.py
|
pypi
|
import sys
class Results(object):
"""
Class to hold the inversion output parameters
as a function of D_max
"""
def __init__(self):
"""
Initialization. Create empty arrays
and dictionary of labels.
"""
# Array of output for each inversion
self.chi2 = []
self.osc = []
self.pos = []
self.pos_err = []
self.rg = []
self.iq0 = []
self.bck = []
self.d_max = []
## List of errors found during the last exploration
self.errors = []
class DistExplorer(object):
"""
The explorer class
"""
def __init__(self, pr_state):
"""
Initialization.
:param pr_state: sas.sascalc.pr.invertor.Invertor object
"""
self.pr_state = pr_state
self._default_min = 0.8 * self.pr_state.d_max
self._default_max = 1.2 * self.pr_state.d_max
def __call__(self, dmin=None, dmax=None, npts=10):
"""
Compute the outputs as a function of D_max.
:param dmin: minimum value for D_max
:param dmax: maximum value for D_max
:param npts: number of points for D_max
"""
# Take care of the defaults if needed
if dmin is None:
dmin = self._default_min
if dmax is None:
dmax = self._default_max
# Results object to store the computation outputs.
results = Results()
# Loop over d_max values
for i in range(npts):
d = dmin + i * (dmax - dmin) / (npts - 1.0)
self.pr_state.d_max = d
try:
out, cov = self.pr_state.invert(self.pr_state.nfunc)
# Store results
iq0 = self.pr_state.iq0(out)
rg = self.pr_state.rg(out)
pos = self.pr_state.get_positive(out)
pos_err = self.pr_state.get_pos_err(out, cov)
osc = self.pr_state.oscillations(out)
results.d_max.append(self.pr_state.d_max)
results.bck.append(self.pr_state.background)
results.chi2.append(self.pr_state.chi2)
results.iq0.append(iq0)
results.rg.append(rg)
results.pos.append(pos)
results.pos_err.append(pos_err)
results.osc.append(osc)
except Exception as exc:
# This inversion failed, skip this D_max value
msg = "ExploreDialog: inversion failed for "
msg += "D_max=%s\n %s" % (str(d), exc)
results.errors.append(msg)
return results
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/pr/distance_explorer.py
| 0.470737 | 0.357736 |
distance_explorer.py
|
pypi
|
from __future__ import division
import sys
import math
import time
import copy
import os
import re
import logging
import time
from functools import reduce
import numpy as np
from numpy import pi
try:
from numba import njit
except ImportError:
#Identity decorator for njit which ignores type signature.
njit = lambda *args, **kw: (lambda x: x)
@njit('f8[:](f8, u8, f8[:])')
def ortho(d_max, n, r):
"""
Orthogonal Functions:
B(r) = 2r sin(pi*nr/d)
:param d_max: d_max.
:param n: n.
:return: B(r).
"""
return (2.0 * r) * np.sin((pi*n/d_max)*r)
#TODO: unused?
@njit('f8[:](f8, u8, f8[:])')
def ortho_derived(d_max, n, r):
"""
First derivative in of the orthogonal function dB(r)/dr.
:param d_max: d_max.
:param n: n.
:return: First derivative in dB(r)/dr.
"""
pinr = (pi * n / d_max) * r
return 2.0 * np.sin(pinr) + 2.0 * r * np.cos(pinr)
@njit('f8[:](f8[:], f8, f8[:])')
def pr(pars, d_max, r):
"""
P(r) calculated from the expansion
:param pars: c-parameters.
:param d_max: d_max.
:param r: r-value to evaluate P(r).
:return: P(r).
"""
total = np.zeros(len(r))
for i, pars_i in enumerate(pars):
total += pars_i * ortho(d_max, i+1, r)
return total
@njit('f8[:, :](f8[:], f8[:,:], f8, f8[:])')
def pr_err(pars, err, d_max, r):
"""
P(r) calculated from the expansion,
with errors.
:param pars: c-parameters.
:param err: err.
:param r: r-value.
:return: [P(r), dP(r)].
"""
total = np.zeros(len(r))
total_err = np.zeros(len(r))
for i, pars_i in enumerate(pars):
func_values = ortho(d_max, i+1, r)
total += pars_i * func_values
total_err += err[i, i] * (func_values ** 2)
pr_value = total
pr_value_err = np.zeros(len(total_err))
index_greater = total_err > 0
index_less = total_err <= 0
pr_value_err[index_greater] = np.sqrt(total_err[index_greater])
pr_value_err[index_less] = total[index_less]
#pr_value_err = np.sqrt(total_err) if (total_err > 0) else total
ret = np.zeros((2, len(r)), dtype=np.float64)
ret[0, :] = pr_value
ret[1, :] = pr_value_err
return ret
@njit('f8[:](f8, f8, f8[:])')
def dprdr_calc(i, d_max, r):
return 2.0*(np.sin(pi*(i+1)*r/d_max) + pi*(i+1)*r/d_max * np.cos(pi*(i+1)*r/d_max))
@njit('f8[:](f8[:], f8, f8[:])')
def dprdr(pars, d_max, r):
"""
dP(r)/dr calculated from the expansion.
:param pars: c-parameters.
:param d_max: d_max.
:param r: r-value.
:return: dP(r)/dr.
"""
total = np.zeros(len(r))
for i, pars_i in enumerate(pars):
total += pars_i * dprdr_calc(i, d_max, r)
return total
@njit('f8[:](f8[:], f8, i8)')
def ortho_transformed(q, d_max, n):
"""
Fourier transform of the nth orthogonal function.
:param q: q (vector).
:param d_max: d_max.
:param n: n.
:return: Fourier transform of nth orthogonal function across all q.
"""
qd = q * (d_max/pi)
return ( 8.0 * d_max**2 * n * (-1.0)**(n+1) ) * np.sinc(qd) / (n**2 - qd**2)
@njit('f8[:](f8[:], f8, i8, f8, f8, u8)')
def ortho_transformed_smeared(q, d_max, n, height, width, npts):
"""
Slit-smeared Fourier transform of the nth orthogonal function.
Smearing follows Lake, Acta Cryst. (1967) 23, 191.
:param q: q (vector).
:param d_max: d_max.
:param n: n.
:param height: slit_height.
:param width: slit_width.
:param npts: npts.
:return: Slit-smeared Fourier transform of nth orthogonal function across all q.
"""
n_width = npts if width > 0 else 1
n_height = npts if height > 0 else 1
dz = height/(npts-1)
y0, dy = -0.5*width, width/(npts-1)
total = np.zeros(len(q), dtype=np.float64)
for j in range(n_height):
zsq = (j * dz)**2
for i in range(n_width):
y = y0 + i*dy
qsq = (q - y)**2 + zsq
total += ortho_transformed(np.sqrt(qsq), d_max, n)
return total / (n_width*n_height)
@njit('f8[:](f8[:], f8[:], f8, f8, f8, u8)')
def iq_smeared(p, q, d_max, height, width, npts):
"""
Scattering intensity calculated from the expansion, slit-smeared.
:param p: c-parameters.
:param q: q (vector).
:param height: slit_height.
:param width: slit_width.
:param npts: npts.
:return: Scattering intensity from the expansion slit-smeared across all q.
"""
size_q = len(q)
total = np.zeros(size_q, dtype=np.float64)
for i, p_i in enumerate(p):
total += p_i * ortho_transformed_smeared(q, d_max, i+1, height, width, npts)
return total
@njit('f8[:](f8[:], f8, f8[:])')
def iq(pars, d_max, q):
"""
Scattering intensity calculated from the expansion.
:param pars: c-parameters.
:param d_max: d_max.
:param q: q (vector).
:return: Scattering intensity from the expansion across all q.
"""
total = np.zeros(len(q), dtype=np.float64)
for i, pars_i in enumerate(pars):
total += pars_i * ortho_transformed(q, d_max, i+1)
return total
@njit('f8(f8[:], f8, u8)')
def reg_term(pars, d_max, nslice):
"""
Regularization term calculated from the expansion.
:param pars: c-parameters.
:param d_max: d_max.
:param nslice: nslice.
:return: Regularization term calculated from the expansion.
"""
dx = d_max/nslice
r = np.linspace(0., d_max - dx, nslice)
deriv = dprdr(pars, d_max, r)
total = np.sum(deriv ** 2)
return total*dx
@njit('f8(f8[:], f8, u8)')
def int_pr_square(pars, d_max, nslice):
"""
Regularization term calculated from the expansion.
:param pars: c-parameters.
:param d_max: d_max.
:param nslice: nslice.
:return: Regularization term calculated from the expansion.
"""
dx = d_max/nslice
r = np.linspace(0., d_max - dx, nslice)
values = pr(pars, d_max, r)
total = np.sum(values ** 2)
return total * dx
@njit('f8(f8[:], f8, u8)')
def int_pr(pars, d_max, nslice):
"""
Integral of P(r).
:param pars: c-parameters.
:param d_max: d_max.
:param nslice: nslice.
:return: Integral of P(r).
"""
dx = d_max/nslice
r = np.linspace(0., d_max - dx, nslice)
values = pr(pars, d_max, r)
total = np.sum(values)
return total * dx
@njit('u8(f8[:], f8, u8)')
def npeaks(pars, d_max, nslice):
"""
Get the number of P(r) peaks.
:param pars: c-parameters.
:param d_max: d_max.
:param nslice: nslice.
:return: Number of P(r) peaks.
"""
dx = d_max/nslice
r = np.linspace(0., d_max - dx, nslice)
values = pr(pars, d_max, r)
# Build an index vector with True for ascending and false for flat or descending
pos = values[:-1] < values[1:]
# Count the number of slope changes
count = np.sum((pos[:-1] != pos[1:]) & pos[:-1])
# Check if starting descending or ending ascending
count += 1 - pos[0] + pos[-1]
return count
@njit('f8(f8[:], f8, u8)')
def positive_integral(pars, d_max, nslice):
"""
Get the fraction of the integral of P(r) over the whole
range of r that is above 0.
A valid P(r) is defined as being positive for all r.
:param pars: c-parameters.
:param d_max: d_max.
:param nslice: nslice.
:return: The fraction of the integral of P(r) over the whole
range of r that is above 0.
"""
dx = d_max/nslice
r = np.linspace(0., d_max - dx, nslice)
values = pr(pars, d_max, r)
total = np.sum(np.fabs(values))
total_pos = np.sum(values[values > 0.0])
return total_pos / total # dx cancels
@njit('f8(f8[:], f8[:,:], f8, u8)')
def positive_errors(pars, err, d_max, nslice):
"""
Get the fraction of the integral of P(r) over the whole range
of r that is at least one sigma above 0.
:param pars: c-parameters.
:param err: error terms.
:param d_max: d_max.
:param nslice: nslice.
:return: The fraction of the integral of P(r) over the whole range
of r that is at least one sigma above 0.
"""
dx = d_max/nslice
r = np.linspace(0., d_max - dx, nslice)
pr_vals = pr_err(pars, err, d_max, r)
pr_val = pr_vals[0, :]
pr_val_err = pr_vals[1, :]
total = np.sum(np.fabs(pr_val))
index = pr_val > pr_val_err
total_pos = np.sum(pr_val[index])
return total_pos / total # dx cancels
@njit('f8(f8[:], f8, u8)')
def rg(pars, d_max, nslice):
"""
R_g radius of gyration calculation
R_g**2 = integral[r**2 * p(r) dr] / (2.0 * integral[p(r) dr])
:param pars: c-parameters.
:param d_max: d_max.
:param nslice: nslice.
:return: R_g radius of gyration.
"""
dx = d_max/nslice
r = np.linspace(0., d_max - dx, nslice)
values = pr(pars, d_max, r)
total = np.sum(values)
total_r2 = np.sum((r ** 2) * values)
return np.sqrt(total_r2 / (2.0*total)) # dx cancels
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/pr/calc.py
| 0.488283 | 0.4206 |
calc.py
|
pypi
|
import copy
import numpy as np
from sas.sascalc.calculator.BaseComponent import BaseComponent
class MultiplicationModel(BaseComponent):
r"""
Use for P(Q)\*S(Q); function call must be in the order of P(Q) and then S(Q):
The model parameters are combined from both models, P(Q) and S(Q), except 1) 'radius_effective' of S(Q)
which will be calculated from P(Q) via calculate_ER(),
and 2) 'scale' in P model which is synchronized w/ volfraction in S
then P*S is multiplied by a new parameter, 'scale_factor'.
The polydispersion is applicable only to P(Q), not to S(Q).
.. note:: P(Q) refers to 'form factor' model while S(Q) does to 'structure factor'.
"""
def __init__(self, p_model, s_model ):
BaseComponent.__init__(self)
"""
:param p_model: form factor, P(Q)
:param s_model: structure factor, S(Q)
"""
## Setting model name model description
self.description = ""
self.name = p_model.name +" * "+ s_model.name
self.description= self.name + "\n"
self.fill_description(p_model, s_model)
## Define parameters
self.params = {}
## Parameter details [units, min, max]
self.details = {}
## Define parameters to exclude from multiplication model
self.excluded_params={'radius_effective','scale','background'}
##models
self.p_model = p_model
self.s_model = s_model
self.magnetic_params = []
## dispersion
self._set_dispersion()
## Define parameters
self._set_params()
## New parameter:Scaling factor
self.params['scale_factor'] = 1
self.params['background'] = 0
## Parameter details [units, min, max]
self._set_details()
self.details['scale_factor'] = ['', 0.0, np.inf]
self.details['background'] = ['',-np.inf,np.inf]
#list of parameter that can be fitted
self._set_fixed_params()
## parameters with orientation
for item in self.p_model.orientation_params:
self.orientation_params.append(item)
for item in self.p_model.magnetic_params:
self.magnetic_params.append(item)
for item in self.s_model.orientation_params:
if not item in self.orientation_params:
self.orientation_params.append(item)
# get multiplicity if model provide it, else 1.
try:
multiplicity = p_model.multiplicity
except AttributeError:
multiplicity = 1
## functional multiplicity of the model
self.multiplicity = multiplicity
# non-fittable parameters
self.non_fittable = p_model.non_fittable
self.multiplicity_info = []
self.fun_list = []
if self.non_fittable > 1:
try:
self.multiplicity_info = p_model.multiplicity_info
self.fun_list = p_model.fun_list
self.is_multiplicity_model = True
except AttributeError:
pass
else:
self.is_multiplicity_model = False
self.multiplicity_info = [0]
def _clone(self, obj):
"""
Internal utility function to copy the internal data members to a
fresh copy.
"""
obj.params = copy.deepcopy(self.params)
obj.description = copy.deepcopy(self.description)
obj.details = copy.deepcopy(self.details)
obj.dispersion = copy.deepcopy(self.dispersion)
obj.p_model = self.p_model.clone()
obj.s_model = self.s_model.clone()
#obj = copy.deepcopy(self)
return obj
def _set_dispersion(self):
"""
combine the two models' dispersions. Polydispersity should not be
applied to s_model
"""
##set dispersion only from p_model
for name , value in self.p_model.dispersion.items():
self.dispersion[name] = value
def getProfile(self):
"""
Get SLD profile of p_model if exists
:return: (r, beta) where r is a list of radius of the transition points\
beta is a list of the corresponding SLD values
.. note:: This works only for func_shell num = 2 (exp function).
"""
try:
x, y = self.p_model.getProfile()
except:
x = None
y = None
return x, y
def _set_params(self):
"""
Concatenate the parameters of the two models to create
these model parameters
"""
for name , value in self.p_model.params.items():
if not name in self.params.keys() and name not in self.excluded_params:
self.params[name] = value
for name , value in self.s_model.params.items():
#Remove the radius_effective from the (P*S) model parameters.
if not name in self.params.keys() and name not in self.excluded_params:
self.params[name] = value
# Set "scale and effec_radius to P and S model as initializing
# since run P*S comes from P and S separately.
self._set_backgrounds()
self._set_scale_factor()
self._set_radius_effective()
def _set_details(self):
"""
Concatenate details of the two models to create
this model's details
"""
for name, detail in self.p_model.details.items():
if name not in self.excluded_params:
self.details[name] = detail
for name , detail in self.s_model.details.items():
if not name in self.details.keys() or name not in self.exluded_params:
self.details[name] = detail
def _set_backgrounds(self):
"""
Set component backgrounds to zero
"""
if 'background' in self.p_model.params:
self.p_model.setParam('background',0)
if 'background' in self.s_model.params:
self.s_model.setParam('background',0)
def _set_scale_factor(self):
"""
Set scale=volfraction for P model
"""
value = self.params['volfraction']
if value is not None:
factor = self.p_model.calculate_VR()
if factor is None or factor == NotImplemented or factor == 0.0:
val = value
else:
val = value / factor
self.p_model.setParam('scale', value)
self.s_model.setParam('volfraction', val)
def _set_radius_effective(self):
"""
Set effective radius to S(Q) model
"""
if not 'radius_effective' in self.s_model.params.keys():
return
effective_radius = self.p_model.calculate_ER()
#Reset the effective_radius of s_model just before the run
if effective_radius is not None and effective_radius != NotImplemented:
self.s_model.setParam('radius_effective', effective_radius)
def setParam(self, name, value):
"""
Set the value of a model parameter
:param name: name of the parameter
:param value: value of the parameter
"""
# set param to P*S model
self._setParamHelper( name, value)
## setParam to p model
# set 'scale' in P(Q) equal to volfraction
if name == 'volfraction':
self._set_scale_factor()
elif name in self.p_model.getParamList() and name not in self.excluded_params:
self.p_model.setParam( name, value)
## setParam to s model
# This is a little bit abundant: Todo: find better way
self._set_radius_effective()
if name in self.s_model.getParamList() and name not in self.excluded_params:
if name != 'volfraction':
self.s_model.setParam( name, value)
#self._setParamHelper( name, value)
def _setParamHelper(self, name, value):
"""
Helper function to setparam
"""
# Look for dispersion parameters
toks = name.split('.')
if len(toks)==2:
for item in self.dispersion.keys():
if item.lower()==toks[0].lower():
for par in self.dispersion[item]:
if par.lower() == toks[1].lower():
self.dispersion[item][par] = value
return
else:
# Look for standard parameter
for item in self.params.keys():
if item.lower() == name.lower():
self.params[item] = value
return
raise ValueError("Model does not contain parameter %s" % name)
def _set_fixed_params(self):
"""
Fill the self.fixed list with the p_model fixed list
"""
for item in self.p_model.fixed:
self.fixed.append(item)
self.fixed.sort()
def run(self, x = 0.0):
"""
Evaluate the model
:param x: input q-value (float or [float, float] as [r, theta])
:return: (scattering function value)
"""
# set effective radius and scaling factor before run
self._set_radius_effective()
self._set_scale_factor()
return self.params['scale_factor'] * self.p_model.run(x) * \
self.s_model.run(x) + self.params['background']
def runXY(self, x = 0.0):
"""
Evaluate the model
:param x: input q-value (float or [float, float] as [qx, qy])
:return: scattering function value
"""
# set effective radius and scaling factor before run
self._set_radius_effective()
self._set_scale_factor()
out = self.params['scale_factor'] * self.p_model.runXY(x) * \
self.s_model.runXY(x) + self.params['background']
return out
## Now (May27,10) directly uses the model eval function
## instead of the for-loop in Base Component.
def evalDistribution(self, x = []):
"""
Evaluate the model in cartesian coordinates
:param x: input q[], or [qx[], qy[]]
:return: scattering function P(q[])
"""
# set effective radius and scaling factor before run
self._set_radius_effective()
self._set_scale_factor()
out = self.params['scale_factor'] * self.p_model.evalDistribution(x) * \
self.s_model.evalDistribution(x) + self.params['background']
return out
def set_dispersion(self, parameter, dispersion):
"""
Set the dispersion object for a model parameter
:param parameter: name of the parameter [string]
:dispersion: dispersion object of type DispersionModel
"""
value = None
try:
if parameter in self.p_model.dispersion.keys():
value = self.p_model.set_dispersion(parameter, dispersion)
self._set_dispersion()
return value
except:
raise
def fill_description(self, p_model, s_model):
"""
Fill the description for P(Q)*S(Q)
"""
description = ""
description += "Note:1) The radius_effective (effective radius) of %s \n"%\
(s_model.name)
description += " is automatically calculated "
description += "from size parameters (radius...).\n"
description += " 2) For non-spherical shape, "
description += "this approximation is valid \n"
description += " only for limited systems. "
description += "Thus, use it at your own risk.\n"
description += "See %s description and %s description \n"% \
( p_model.name, s_model.name )
description += " for details of individual models."
self.description += description
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/fit/MultiplicationModel.py
| 0.575707 | 0.328476 |
MultiplicationModel.py
|
pypi
|
# TODO: Keep track of data manipulation in the 'process' data structure.
# TODO: This module should be independent of plottables. We should write
# an adapter class for plottables when needed.
from __future__ import print_function
import math
from math import fabs
import copy
import numpy as np
from sas.sascalc.data_util.uncertainty import Uncertainty
class plottable_1D(object):
"""
Data1D is a place holder for 1D plottables.
"""
# The presence of these should be mutually
# exclusive with the presence of Qdev (dx)
x = None
y = None
dx = None
dy = None
# Slit smearing length
dxl = None
# Slit smearing width
dxw = None
# SESANS specific params (wavelengths for spin echo length calculation)
lam = None
dlam = None
# Units
_xaxis = ''
_xunit = ''
_yaxis = ''
_yunit = ''
def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None,
lam=None, dlam=None):
self.x = np.asarray(x)
self.y = np.asarray(y)
if dx is not None:
self.dx = np.asarray(dx)
if dy is not None:
self.dy = np.asarray(dy)
if dxl is not None:
self.dxl = np.asarray(dxl)
if dxw is not None:
self.dxw = np.asarray(dxw)
if lam is not None:
self.lam = np.asarray(lam)
if dlam is not None:
self.dlam = np.asarray(dlam)
def xaxis(self, label, unit):
"""
set the x axis label and unit
"""
self._xaxis = label
self._xunit = unit
def yaxis(self, label, unit):
"""
set the y axis label and unit
"""
self._yaxis = label
self._yunit = unit
class plottable_2D(object):
"""
Data2D is a place holder for 2D plottables.
"""
xmin = None
xmax = None
ymin = None
ymax = None
data = None
qx_data = None
qy_data = None
q_data = None
err_data = None
dqx_data = None
dqy_data = None
mask = None
x_bins = None
y_bins = None
# Units
_xaxis = ''
_xunit = ''
_yaxis = ''
_yunit = ''
_zaxis = ''
_zunit = ''
def __init__(self, data=None, err_data=None, qx_data=None,
qy_data=None, q_data=None, mask=None,
dqx_data=None, dqy_data=None, xmin=None, xmax=None,
ymin=None, ymax=None, zmin=None, zmax=None,
x_bins=None, y_bins=None):
self.data = np.asarray(data)
self.qx_data = np.asarray(qx_data)
self.qy_data = np.asarray(qy_data)
self.q_data = np.asarray(q_data)
if mask is not None:
self.mask = np.asarray(mask)
else:
self.mask = np.ones(self.data.shape, dtype=bool)
if err_data is not None:
self.err_data = np.asarray(err_data)
if dqx_data is not None:
self.dqx_data = np.asarray(dqx_data)
if dqy_data is not None:
self.dqy_data = np.asarray(dqy_data)
# plot limits
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.zmin = zmin
self.zmax = zmax
self.y_bins = x_bins if x_bins else []
self.x_bins = y_bins if y_bins else []
def xaxis(self, label, unit):
"""
set the x axis label and unit
"""
self._xaxis = label
self._xunit = unit
def yaxis(self, label, unit):
"""
set the y axis label and unit
"""
self._yaxis = label
self._yunit = unit
def zaxis(self, label, unit):
"""
set the z axis label and unit
"""
self._zaxis = label
self._zunit = unit
class Vector(object):
"""
Vector class to hold multi-dimensional objects
"""
# x component
x = None
# y component
y = None
# z component
z = None
def __init__(self, x=None, y=None, z=None):
"""
Initialization. Components that are not
set a set to None by default.
:param x: x component
:param y: y component
:param z: z component
"""
self.x = x
self.y = y
self.z = z
def __str__(self):
msg = "x = %s\ty = %s\tz = %s" % (str(self.x), str(self.y), str(self.z))
return msg
class Detector(object):
"""
Class to hold detector information
"""
# Name of the instrument [string]
name = None
# Sample to detector distance [float] [mm]
distance = None
distance_unit = 'mm'
# Offset of this detector position in X, Y,
# (and Z if necessary) [Vector] [mm]
offset = None
offset_unit = 'm'
# Orientation (rotation) of this detector in roll,
# pitch, and yaw [Vector] [degrees]
orientation = None
orientation_unit = 'degree'
# Center of the beam on the detector in X and Y
# (and Z if necessary) [Vector] [mm]
beam_center = None
beam_center_unit = 'mm'
# Pixel size in X, Y, (and Z if necessary) [Vector] [mm]
pixel_size = None
pixel_size_unit = 'mm'
# Slit length of the instrument for this detector.[float] [mm]
slit_length = None
slit_length_unit = 'mm'
def __init__(self):
"""
Initialize class attribute that are objects...
"""
self.offset = Vector()
self.orientation = Vector()
self.beam_center = Vector()
self.pixel_size = Vector()
def __str__(self):
_str = "Detector:\n"
_str += " Name: %s\n" % self.name
_str += " Distance: %s [%s]\n" % \
(str(self.distance), str(self.distance_unit))
_str += " Offset: %s [%s]\n" % \
(str(self.offset), str(self.offset_unit))
_str += " Orientation: %s [%s]\n" % \
(str(self.orientation), str(self.orientation_unit))
_str += " Beam center: %s [%s]\n" % \
(str(self.beam_center), str(self.beam_center_unit))
_str += " Pixel size: %s [%s]\n" % \
(str(self.pixel_size), str(self.pixel_size_unit))
_str += " Slit length: %s [%s]\n" % \
(str(self.slit_length), str(self.slit_length_unit))
return _str
class Aperture(object):
# Name
name = None
# Type
type = None
# Size name
size_name = None
# Aperture size [Vector]
size = None
size_unit = 'mm'
# Aperture distance [float]
distance = None
distance_unit = 'mm'
def __init__(self):
self.size = Vector()
class Collimation(object):
"""
Class to hold collimation information
"""
# Name
name = None
# Length [float] [mm]
length = None
length_unit = 'mm'
# Aperture
aperture = None
def __init__(self):
self.aperture = []
def __str__(self):
_str = "Collimation:\n"
_str += " Length: %s [%s]\n" % \
(str(self.length), str(self.length_unit))
for item in self.aperture:
_str += " Aperture size:%s [%s]\n" % \
(str(item.size), str(item.size_unit))
_str += " Aperture_dist:%s [%s]\n" % \
(str(item.distance), str(item.distance_unit))
return _str
class Source(object):
"""
Class to hold source information
"""
# Name
name = None
# Generic radiation type (Type and probe give more specific info) [string]
radiation = None
# Type and probe are only written to by the NXcanSAS reader
# Specific radiation type (Synchotron X-ray, Reactor neutron, etc) [string]
type = None
# Radiation probe (generic probe such as neutron, x-ray, muon, etc) [string]
probe = None
# Beam size name
beam_size_name = None
# Beam size [Vector] [mm]
beam_size = None
beam_size_unit = 'mm'
# Beam shape [string]
beam_shape = None
# Wavelength [float] [Angstrom]
wavelength = None
wavelength_unit = 'A'
# Minimum wavelength [float] [Angstrom]
wavelength_min = None
wavelength_min_unit = 'nm'
# Maximum wavelength [float] [Angstrom]
wavelength_max = None
wavelength_max_unit = 'nm'
# Wavelength spread [float] [Angstrom]
wavelength_spread = None
wavelength_spread_unit = 'percent'
def __init__(self):
self.beam_size = Vector()
def __str__(self):
_str = "Source:\n"
radiation = self.radiation
if self.radiation is None and self.type and self.probe:
radiation = self.type + " " + self.probe
_str += " Radiation: %s\n" % str(radiation)
_str += " Shape: %s\n" % str(self.beam_shape)
_str += " Wavelength: %s [%s]\n" % \
(str(self.wavelength), str(self.wavelength_unit))
_str += " Waveln_min: %s [%s]\n" % \
(str(self.wavelength_min), str(self.wavelength_min_unit))
_str += " Waveln_max: %s [%s]\n" % \
(str(self.wavelength_max), str(self.wavelength_max_unit))
_str += " Waveln_spread:%s [%s]\n" % \
(str(self.wavelength_spread), str(self.wavelength_spread_unit))
_str += " Beam_size: %s [%s]\n" % \
(str(self.beam_size), str(self.beam_size_unit))
return _str
"""
Definitions of radiation types
"""
NEUTRON = 'neutron'
XRAY = 'x-ray'
MUON = 'muon'
ELECTRON = 'electron'
class Sample(object):
"""
Class to hold the sample description
"""
# Short name for sample
name = ''
# ID
ID = ''
# Thickness [float] [mm]
thickness = None
thickness_unit = 'mm'
# Transmission [float] [fraction]
transmission = None
# Temperature [float] [No Default]
temperature = None
temperature_unit = None
# Position [Vector] [mm]
position = None
position_unit = 'mm'
# Orientation [Vector] [degrees]
orientation = None
orientation_unit = 'degree'
# Details
details = None
# SESANS zacceptance
zacceptance = (0,"")
yacceptance = (0,"")
def __init__(self):
self.position = Vector()
self.orientation = Vector()
self.details = []
def __str__(self):
_str = "Sample:\n"
_str += " ID: %s\n" % str(self.ID)
_str += " Transmission: %s\n" % str(self.transmission)
_str += " Thickness: %s [%s]\n" % \
(str(self.thickness), str(self.thickness_unit))
_str += " Temperature: %s [%s]\n" % \
(str(self.temperature), str(self.temperature_unit))
_str += " Position: %s [%s]\n" % \
(str(self.position), str(self.position_unit))
_str += " Orientation: %s [%s]\n" % \
(str(self.orientation), str(self.orientation_unit))
_str += " Details:\n"
for item in self.details:
_str += " %s\n" % item
return _str
class Process(object):
"""
Class that holds information about the processes
performed on the data.
"""
name = ''
date = ''
description = ''
term = None
notes = None
def __init__(self):
self.term = []
self.notes = []
def is_empty(self):
"""
Return True if the object is empty
"""
return (len(self.name) == 0 and len(self.date) == 0
and len(self.description) == 0 and len(self.term) == 0
and len(self.notes) == 0)
def single_line_desc(self):
"""
Return a single line string representing the process
"""
return "%s %s %s" % (self.name, self.date, self.description)
def __str__(self):
_str = "Process:\n"
_str += " Name: %s\n" % self.name
_str += " Date: %s\n" % self.date
_str += " Description: %s\n" % self.description
for item in self.term:
_str += " Term: %s\n" % item
for item in self.notes:
_str += " Note: %s\n" % item
return _str
class TransmissionSpectrum(object):
"""
Class that holds information about transmission spectrum
for white beams and spallation sources.
"""
name = ''
timestamp = ''
# Wavelength (float) [A]
wavelength = None
wavelength_unit = 'A'
# Transmission (float) [unit less]
transmission = None
transmission_unit = ''
# Transmission Deviation (float) [unit less]
transmission_deviation = None
transmission_deviation_unit = ''
def __init__(self):
self.wavelength = []
self.transmission = []
self.transmission_deviation = []
def __str__(self):
_str = "Transmission Spectrum:\n"
_str += " Name: \t{0}\n".format(self.name)
_str += " Timestamp: \t{0}\n".format(self.timestamp)
_str += " Wavelength unit: \t{0}\n".format(self.wavelength_unit)
_str += " Transmission unit:\t{0}\n".format(self.transmission_unit)
_str += " Trans. Dev. unit: \t{0}\n".format(
self.transmission_deviation_unit)
length_list = [len(self.wavelength), len(self.transmission),
len(self.transmission_deviation)]
_str += " Number of Pts: \t{0}\n".format(max(length_list))
return _str
class DataInfo(object):
"""
Class to hold the data read from a file.
It includes four blocks of data for the
instrument description, the sample description,
the data itself and any other meta data.
"""
# Title
title = ''
# Run number
run = None
# Run name
run_name = None
# File name
filename = ''
# Notes
notes = None
# Processes (Action on the data)
process = None
# Instrument name
instrument = ''
# Detector information
detector = None
# Sample information
sample = None
# Source information
source = None
# Collimation information
collimation = None
# Transmission Spectrum INfo
trans_spectrum = None
# Additional meta-data
meta_data = None
# Loading errors
errors = None
# SESANS data check
isSesans = None
def __init__(self):
"""
Initialization
"""
# Title
self.title = ''
# Run number
self.run = []
self.run_name = {}
# File name
self.filename = ''
# Notes
self.notes = []
# Processes (Action on the data)
self.process = []
# Instrument name
self.instrument = ''
# Detector information
self.detector = []
# Sample information
self.sample = Sample()
# Source information
self.source = Source()
# Collimation information
self.collimation = []
# Transmission Spectrum
self.trans_spectrum = []
# Additional meta-data
self.meta_data = {}
# Loading errors
self.errors = []
# SESANS data check
self.isSesans = False
def append_empty_process(self):
"""
"""
self.process.append(Process())
def add_notes(self, message=""):
"""
Add notes to datainfo
"""
self.notes.append(message)
def __str__(self):
"""
Nice printout
"""
_str = f"File: {self.filename}\n"
_str += f"Title: {self.title}\n"
_str += f"Run: {self.run}\n"
_str += f"SESANS: {self.isSesans}\n"
_str += f"Instrument: {self.instrument}\n"
_str += f"{str(self.sample)}\n"
_str += f"{str(self.source)}\n"
for item in self.detector:
_str += f"{str(item)}\n"
for item in self.collimation:
_str += f"{str(item)}\n"
for item in self.process:
_str += f"{str(item)}\n"
for item in self.notes:
_str += f"{str(item)}\n"
for item in self.trans_spectrum:
_str += f"{str(item)}\n"
return _str
# Private method to perform operation. Not implemented for DataInfo,
# but should be implemented for each data class inherited from DataInfo
# that holds actual data (ex.: Data1D)
def _perform_operation(self, other, operation):
"""
Private method to perform operation. Not implemented for DataInfo,
but should be implemented for each data class inherited from DataInfo
that holds actual data (ex.: Data1D)
"""
return NotImplemented
def _perform_union(self, other):
"""
Private method to perform union operation. Not implemented for DataInfo,
but should be implemented for each data class inherited from DataInfo
that holds actual data (ex.: Data1D)
"""
return NotImplemented
def __add__(self, other):
"""
Add two data sets
:param other: data set to add to the current one
:return: new data set
:raise ValueError: raised when two data sets are incompatible
"""
def operation(a, b):
return a + b
return self._perform_operation(other, operation)
def __radd__(self, other):
"""
Add two data sets
:param other: data set to add to the current one
:return: new data set
:raise ValueError: raised when two data sets are incompatible
"""
def operation(a, b):
return b + a
return self._perform_operation(other, operation)
def __sub__(self, other):
"""
Subtract two data sets
:param other: data set to subtract from the current one
:return: new data set
:raise ValueError: raised when two data sets are incompatible
"""
def operation(a, b):
return a - b
return self._perform_operation(other, operation)
def __rsub__(self, other):
"""
Subtract two data sets
:param other: data set to subtract from the current one
:return: new data set
:raise ValueError: raised when two data sets are incompatible
"""
def operation(a, b):
return b - a
return self._perform_operation(other, operation)
def __mul__(self, other):
"""
Multiply two data sets
:param other: data set to subtract from the current one
:return: new data set
:raise ValueError: raised when two data sets are incompatible
"""
def operation(a, b):
return a * b
return self._perform_operation(other, operation)
def __rmul__(self, other):
"""
Multiply two data sets
:param other: data set to subtract from the current one
:return: new data set
:raise ValueError: raised when two data sets are incompatible
"""
def operation(a, b):
return b * a
return self._perform_operation(other, operation)
def __truediv__(self, other):
"""
Divided a data set by another
:param other: data set that the current one is divided by
:return: new data set
:raise ValueError: raised when two data sets are incompatible
"""
def operation(a, b):
return a/b
return self._perform_operation(other, operation)
__div__ = __truediv__
def __rtruediv__(self, other):
"""
Divided a data set by another
:param other: data set that the current one is divided by
:return: new data set
:raise ValueError: raised when two data sets are incompatible
"""
def operation(a, b):
return b/a
return self._perform_operation(other, operation)
__rdiv__ = __rtruediv__
def __or__(self, other):
"""
Union a data set with another
:param other: data set to be unified
:return: new data set
:raise ValueError: raised when two data sets are incompatible
"""
return self._perform_union(other)
def __ror__(self, other):
"""
Union a data set with another
:param other: data set to be unified
:return: new data set
:raise ValueError: raised when two data sets are incompatible
"""
return self._perform_union(other)
class Data1D(plottable_1D, DataInfo):
"""
1D data class
"""
def __init__(self, x=None, y=None, dx=None, dy=None,
lam=None, dlam=None, isSesans=False):
DataInfo.__init__(self)
plottable_1D.__init__(self, x, y, dx, dy, None, None, lam, dlam)
self.isSesans = isSesans
try:
if self.isSesans: # the data is SESANS
self.x_unit = 'A'
self.y_unit = 'pol'
elif not self.isSesans: # the data is SANS
self.x_unit = '1/A'
self.y_unit = '1/cm'
except Exception: # the data is not recognized, notifying user
raise TypeError('Check documentation for supported 1D data formats')
def __str__(self):
"""
Nice printout
"""
_str = "%s\n" % DataInfo.__str__(self)
_str += "Data:\n"
_str += " Type: %s\n" % self.__class__.__name__
_str += " X-axis: %s\t[%s]\n" % (self._xaxis, self._xunit)
_str += " Y-axis: %s\t[%s]\n" % (self._yaxis, self._yunit)
_str += " Length: %g\n" % len(self.x)
return _str
def is_slit_smeared(self):
"""
Check whether the data has slit smearing information
:return: True is slit smearing info is present, False otherwise
"""
def _check(v):
return ((v.__class__ == list or v.__class__ == np.ndarray)
and len(v) > 0 and min(v) > 0)
return _check(self.dxl) or _check(self.dxw)
def clone_without_data(self, length=0, clone=None):
"""
Clone the current object, without copying the data (which
will be filled out by a subsequent operation).
The data arrays will be initialized to zero.
:param length: length of the data array to be initialized
:param clone: if provided, the data will be copied to clone
"""
from copy import deepcopy
if clone is None or not issubclass(clone.__class__, Data1D):
x = np.zeros(length)
dx = np.zeros(length)
y = np.zeros(length)
dy = np.zeros(length)
lam = np.zeros(length)
dlam = np.zeros(length)
clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam)
clone.title = self.title
clone.run = self.run
clone.filename = self.filename
clone.instrument = self.instrument
clone.notes = deepcopy(self.notes)
clone.process = deepcopy(self.process)
clone.detector = deepcopy(self.detector)
clone.sample = deepcopy(self.sample)
clone.source = deepcopy(self.source)
clone.collimation = deepcopy(self.collimation)
clone.trans_spectrum = deepcopy(self.trans_spectrum)
clone.meta_data = deepcopy(self.meta_data)
clone.errors = deepcopy(self.errors)
return clone
def copy_from_datainfo(self, data1d):
"""
copy values of Data1D of type DataLaoder.Data_info
"""
self.x = copy.deepcopy(data1d.x)
self.y = copy.deepcopy(data1d.y)
self.dy = copy.deepcopy(data1d.dy)
if hasattr(data1d, "dx"):
self.dx = copy.deepcopy(data1d.dx)
if hasattr(data1d, "dxl"):
self.dxl = copy.deepcopy(data1d.dxl)
if hasattr(data1d, "dxw"):
self.dxw = copy.deepcopy(data1d.dxw)
self.xaxis(data1d._xaxis, data1d._xunit)
self.yaxis(data1d._yaxis, data1d._yunit)
self.title = data1d.title
def _validity_check(self, other):
"""
Checks that the data lengths are compatible.
Checks that the x vectors are compatible.
Returns errors vectors equal to original
errors vectors if they were present or vectors
of zeros when none was found.
:param other: other data set for operation
:return: dy for self, dy for other [numpy arrays]
:raise ValueError: when lengths are not compatible
"""
dy_other = None
if isinstance(other, Data1D):
# Check that data lengths are the same
if len(self.x) != len(other.x) or len(self.y) != len(other.y):
msg = "Unable to perform operation: data length are not equal"
raise ValueError(msg)
# Here we could also extrapolate between data points
TOLERANCE = 0.01
for i in range(len(self.x)):
if fabs(self.x[i] - other.x[i]) > self.x[i]*TOLERANCE:
msg = "Incompatible data sets: x-values do not match"
raise ValueError(msg)
# Check that the other data set has errors, otherwise
# create zero vector
dy_other = other.dy
if other.dy is None or (len(other.dy) != len(other.y)):
dy_other = np.zeros(len(other.y))
# Check that we have errors, otherwise create zero vector
dy = self.dy
if self.dy is None or (len(self.dy) != len(self.y)):
dy = np.zeros(len(self.y))
return dy, dy_other
def _perform_operation(self, other, operation):
"""
"""
# First, check the data compatibility
dy, dy_other = self._validity_check(other)
result = self.clone_without_data(len(self.x))
if self.dxw is None:
result.dxw = None
else:
result.dxw = np.zeros(len(self.x))
if self.dxl is None:
result.dxl = None
else:
result.dxl = np.zeros(len(self.x))
for i in range(len(self.x)):
result.x[i] = self.x[i]
if self.dx is not None and len(self.x) == len(self.dx):
result.dx[i] = self.dx[i]
if self.dxw is not None and len(self.x) == len(self.dxw):
result.dxw[i] = self.dxw[i]
if self.dxl is not None and len(self.x) == len(self.dxl):
result.dxl[i] = self.dxl[i]
a = Uncertainty(self.y[i], dy[i]**2)
if isinstance(other, Data1D):
b = Uncertainty(other.y[i], dy_other[i]**2)
if other.dx is not None:
result.dx[i] *= self.dx[i]
result.dx[i] += (other.dx[i]**2)
result.dx[i] /= 2
result.dx[i] = math.sqrt(result.dx[i])
if result.dxl is not None and other.dxl is not None:
result.dxl[i] *= self.dxl[i]
result.dxl[i] += (other.dxl[i]**2)
result.dxl[i] /= 2
result.dxl[i] = math.sqrt(result.dxl[i])
else:
b = other
output = operation(a, b)
result.y[i] = output.x
result.dy[i] = math.sqrt(math.fabs(output.variance))
return result
def _validity_check_union(self, other):
"""
Checks that the data lengths are compatible.
Checks that the x vectors are compatible.
Returns errors vectors equal to original
errors vectors if they were present or vectors
of zeros when none was found.
:param other: other data set for operation
:return: bool
:raise ValueError: when data types are not compatible
"""
if not isinstance(other, Data1D):
msg = "Unable to perform operation: different types of data set"
raise ValueError(msg)
return True
def _perform_union(self, other):
"""
"""
# First, check the data compatibility
self._validity_check_union(other)
result = self.clone_without_data(len(self.x) + len(other.x))
if self.dy is None or other.dy is None:
result.dy = None
else:
result.dy = np.zeros(len(self.x) + len(other.x))
if self.dx is None or other.dx is None:
result.dx = None
else:
result.dx = np.zeros(len(self.x) + len(other.x))
if self.dxw is None or other.dxw is None:
result.dxw = None
else:
result.dxw = np.zeros(len(self.x) + len(other.x))
if self.dxl is None or other.dxl is None:
result.dxl = None
else:
result.dxl = np.zeros(len(self.x) + len(other.x))
result.x = np.append(self.x, other.x)
# argsorting
ind = np.argsort(result.x)
result.x = result.x[ind]
result.y = np.append(self.y, other.y)
result.y = result.y[ind]
if result.dy is not None:
result.dy = np.append(self.dy, other.dy)
result.dy = result.dy[ind]
if result.dx is not None:
result.dx = np.append(self.dx, other.dx)
result.dx = result.dx[ind]
if result.dxw is not None:
result.dxw = np.append(self.dxw, other.dxw)
result.dxw = result.dxw[ind]
if result.dxl is not None:
result.dxl = np.append(self.dxl, other.dxl)
result.dxl = result.dxl[ind]
return result
class Data2D(plottable_2D, DataInfo):
"""
2D data class
"""
# Units for Q-values
Q_unit = '1/A'
# Units for I(Q) values
I_unit = '1/cm'
# No 2D SESANS data as of yet. Always set it to False
isSesans = False
def __init__(self, data=None, err_data=None, qx_data=None,
qy_data=None, q_data=None, mask=None,
dqx_data=None, dqy_data=None,
xmin=None, xmax=None, ymin=None, ymax=None,
zmin=None, zmax=None):
DataInfo.__init__(self)
plottable_2D.__init__(self, data=data, err_data=err_data,
qx_data=qx_data, qy_data=qy_data,
dqx_data=dqx_data, dqy_data=dqy_data,
q_data=q_data, mask=mask, xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax, zmin=zmin, zmax=zmax)
if len(self.detector) > 0:
raise RuntimeError("Data2D: Detector bank already filled at init")
def __str__(self):
_str = "%s\n" % DataInfo.__str__(self)
_str += "Data:\n"
_str += " Type: %s\n" % self.__class__.__name__
_str += " X-axis: %s\t[%s]\n" % (self._xaxis, self._xunit)
_str += " Y-axis: %s\t[%s]\n" % (self._yaxis, self._yunit)
_str += " Z-axis: %s\t[%s]\n" % (self._zaxis, self._zunit)
_str += " Length: %g \n" % (len(self.data))
_str += " Shape: (%d, %d)\n" % (len(self.y_bins),
len(self.x_bins))
return _str
def clone_without_data(self, length=0, clone=None):
"""
Clone the current object, without copying the data (which
will be filled out by a subsequent operation).
The data arrays will be initialized to zero.
:param length: length of the data array to be initialized
:param clone: if provided, the data will be copied to clone
"""
from copy import deepcopy
if clone is None or not issubclass(clone.__class__, Data2D):
data = np.zeros(length)
err_data = np.zeros(length)
qx_data = np.zeros(length)
qy_data = np.zeros(length)
q_data = np.zeros(length)
mask = np.zeros(length)
clone = Data2D(data=data, err_data=err_data,
qx_data=qx_data, qy_data=qy_data,
q_data=q_data, mask=mask)
clone._xaxis = self._xaxis
clone._yaxis = self._yaxis
clone._zaxis = self._zaxis
clone._xunit = self._xunit
clone._yunit = self._yunit
clone._zunit = self._zunit
clone.x_bins = self.x_bins
clone.y_bins = self.y_bins
clone.title = self.title
clone.run = self.run
clone.filename = self.filename
clone.instrument = self.instrument
clone.notes = deepcopy(self.notes)
clone.process = deepcopy(self.process)
clone.detector = deepcopy(self.detector)
clone.sample = deepcopy(self.sample)
clone.source = deepcopy(self.source)
clone.collimation = deepcopy(self.collimation)
clone.trans_spectrum = deepcopy(self.trans_spectrum)
clone.meta_data = deepcopy(self.meta_data)
clone.errors = deepcopy(self.errors)
return clone
def copy_from_datainfo(self, data2d):
"""
copy value of Data2D of type DataLoader.data_info
"""
self.data = copy.deepcopy(data2d.data)
self.qx_data = copy.deepcopy(data2d.qx_data)
self.qy_data = copy.deepcopy(data2d.qy_data)
self.q_data = copy.deepcopy(data2d.q_data)
self.mask = copy.deepcopy(data2d.mask)
self.err_data = copy.deepcopy(data2d.err_data)
self.x_bins = copy.deepcopy(data2d.x_bins)
self.y_bins = copy.deepcopy(data2d.y_bins)
if data2d.dqx_data is not None:
self.dqx_data = copy.deepcopy(data2d.dqx_data)
if data2d.dqy_data is not None:
self.dqy_data = copy.deepcopy(data2d.dqy_data)
self.xmin = data2d.xmin
self.xmax = data2d.xmax
self.ymin = data2d.ymin
self.ymax = data2d.ymax
if hasattr(data2d, "zmin"):
self.zmin = data2d.zmin
if hasattr(data2d, "zmax"):
self.zmax = data2d.zmax
self.xaxis(data2d._xaxis, data2d._xunit)
self.yaxis(data2d._yaxis, data2d._yunit)
self.title = data2d.title
def _validity_check(self, other):
"""
Checks that the data lengths are compatible.
Checks that the x vectors are compatible.
Returns errors vectors equal to original
errors vectors if they were present or vectors
of zeros when none was found.
:param other: other data set for operation
:return: dy for self, dy for other [numpy arrays]
:raise ValueError: when lengths are not compatible
"""
err_other = None
TOLERANCE = 0.01
msg_base = "Incompatible data sets: q-values do not match: "
if isinstance(other, Data2D):
# Check that data lengths are the same
if (len(self.data) != len(other.data)
or len(self.qx_data) != len(other.qx_data)
or len(self.qy_data) != len(other.qy_data)):
msg = "Unable to perform operation: data length are not equal"
raise ValueError(msg)
for ind in range(len(self.data)):
if (fabs(self.qx_data[ind] - other.qx_data[ind])
> fabs(self.qx_data[ind])*TOLERANCE):
msg = f"{msg_base}{self.qx_data[ind]} {other.qx_data[ind]}"
raise ValueError(msg)
if (fabs(self.qy_data[ind] - other.qy_data[ind])
> fabs(self.qy_data[ind])*TOLERANCE):
msg = f"{msg_base}{self.qy_data[ind]} {other.qy_data[ind]}"
raise ValueError(msg)
# Check that the scales match
err_other = other.err_data
if (other.err_data is None
or (len(other.err_data) != len(other.data))):
err_other = np.zeros(len(other.data))
# Check that we have errors, otherwise create zero vector
err = self.err_data
if self.err_data is None or (len(self.err_data) != len(self.data)):
err = np.zeros(len(other.data))
return err, err_other
def _perform_operation(self, other, operation):
"""
Perform 2D operations between data sets
:param other: other data set
:param operation: function defining the operation
"""
# First, check the data compatibility
dy, dy_other = self._validity_check(other)
result = self.clone_without_data(np.size(self.data))
if self.dqx_data is None or self.dqy_data is None:
result.dqx_data = None
result.dqy_data = None
else:
result.dqx_data = np.zeros(len(self.data))
result.dqy_data = np.zeros(len(self.data))
for i in range(np.size(self.data)):
result.data[i] = self.data[i]
if (self.err_data is not None
and np.size(self.data) == np.size(self.err_data)):
result.err_data[i] = self.err_data[i]
if self.dqx_data is not None:
result.dqx_data[i] = self.dqx_data[i]
if self.dqy_data is not None:
result.dqy_data[i] = self.dqy_data[i]
result.qx_data[i] = self.qx_data[i]
result.qy_data[i] = self.qy_data[i]
result.q_data[i] = self.q_data[i]
result.mask[i] = self.mask[i]
a = Uncertainty(self.data[i], dy[i]**2)
if isinstance(other, Data2D):
b = Uncertainty(other.data[i], dy_other[i]**2)
if other.dqx_data is not None and result.dqx_data is not None:
result.dqx_data[i] *= self.dqx_data[i]
result.dqx_data[i] += (other.dqx_data[i]**2)
result.dqx_data[i] /= 2
result.dqx_data[i] = math.sqrt(result.dqx_data[i])
if other.dqy_data is not None and result.dqy_data is not None:
result.dqy_data[i] *= self.dqy_data[i]
result.dqy_data[i] += (other.dqy_data[i]**2)
result.dqy_data[i] /= 2
result.dqy_data[i] = math.sqrt(result.dqy_data[i])
else:
b = other
output = operation(a, b)
result.data[i] = output.x
result.err_data[i] = math.sqrt(math.fabs(output.variance))
return result
@staticmethod
def _validity_check_union(self, other):
"""
Checks that the data lengths are compatible.
Checks that the x vectors are compatible.
Returns errors vectors equal to original
errors vectors if they were present or vectors
of zeros when none was found.
:param other: other data set for operation
:return: bool
:raise ValueError: when data types are not compatible
"""
if not isinstance(other, Data2D):
msg = "Unable to perform operation: different types of data set"
raise ValueError(msg)
return True
def _perform_union(self, other):
"""
Perform 2D operations between data sets
:param other: other data set
:param operation: function defining the operation
"""
# First, check the data compatibility
self._validity_check_union(other)
result = self.clone_without_data(np.size(self.data)
+ np.size(other.data))
result.xmin = self.xmin
result.xmax = self.xmax
result.ymin = self.ymin
result.ymax = self.ymax
if (self.dqx_data is None or self.dqy_data is None
or other.dqx_data is None or other.dqy_data is None):
result.dqx_data = None
result.dqy_data = None
else:
result.dqx_data = np.zeros(len(self.data) + np.size(other.data))
result.dqy_data = np.zeros(len(self.data) + np.size(other.data))
result.data = np.append(self.data, other.data)
result.qx_data = np.append(self.qx_data, other.qx_data)
result.qy_data = np.append(self.qy_data, other.qy_data)
result.q_data = np.append(self.q_data, other.q_data)
result.mask = np.append(self.mask, other.mask)
if result.err_data is not None:
result.err_data = np.append(self.err_data, other.err_data)
if self.dqx_data is not None:
result.dqx_data = np.append(self.dqx_data, other.dqx_data)
if self.dqy_data is not None:
result.dqy_data = np.append(self.dqy_data, other.dqy_data)
return result
def combine_data_info_with_plottable(data, datainfo):
"""
A function that combines the DataInfo data in self.current_datainto with a
plottable_1D or 2D data object.
:param data: A plottable_1D or plottable_2D data object
:param datainfo: A DataInfo object to be combined with the plottable
:return: A fully specified Data1D or Data2D object
"""
if isinstance(data, plottable_1D):
final_dataset = Data1D(data.x, data.y, isSesans=datainfo.isSesans)
final_dataset.dx = data.dx
final_dataset.dy = data.dy
final_dataset.dxl = data.dxl
final_dataset.dxw = data.dxw
final_dataset.x_unit = data._xunit
final_dataset.y_unit = data._yunit
final_dataset.xaxis(data._xaxis, data._xunit)
final_dataset.yaxis(data._yaxis, data._yunit)
elif isinstance(data, plottable_2D):
final_dataset = Data2D(data.data, data.err_data, data.qx_data,
data.qy_data, data.q_data, data.mask,
data.dqx_data, data.dqy_data)
final_dataset.xaxis(data._xaxis, data._xunit)
final_dataset.yaxis(data._yaxis, data._yunit)
final_dataset.zaxis(data._zaxis, data._zunit)
final_dataset.x_bins = data.x_bins
final_dataset.y_bins = data.y_bins
else:
return_string = ("Should Never Happen: _combine_data_info_with_plottabl"
"e input is not a plottable1d or plottable2d data "
"object")
return return_string
if hasattr(data, "xmax"):
final_dataset.xmax = data.xmax
if hasattr(data, "ymax"):
final_dataset.ymax = data.ymax
if hasattr(data, "xmin"):
final_dataset.xmin = data.xmin
if hasattr(data, "ymin"):
final_dataset.ymin = data.ymin
final_dataset.isSesans = datainfo.isSesans
final_dataset.title = datainfo.title
final_dataset.run = datainfo.run
final_dataset.run_name = datainfo.run_name
final_dataset.filename = datainfo.filename
final_dataset.notes = datainfo.notes
final_dataset.process = datainfo.process
final_dataset.instrument = datainfo.instrument
final_dataset.detector = datainfo.detector
final_dataset.sample = datainfo.sample
final_dataset.source = datainfo.source
final_dataset.collimation = datainfo.collimation
final_dataset.trans_spectrum = datainfo.trans_spectrum
final_dataset.meta_data = datainfo.meta_data
final_dataset.errors = datainfo.errors
return final_dataset
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/dataloader/data_info.py
| 0.455683 | 0.487551 |
data_info.py
|
pypi
|
import logging
from sas.sascalc.dataloader.readers import abs_reader
from sas.sascalc.dataloader.readers import anton_paar_saxs_reader
from sas.sascalc.dataloader.readers import ascii_reader
from sas.sascalc.dataloader.readers import cansas_reader
from sas.sascalc.dataloader.readers import cansas_reader_HDF5
from sas.sascalc.dataloader.readers import csv_reader
from sas.sascalc.dataloader.readers import danse_reader
from sas.sascalc.dataloader.readers import red2d_reader
from sas.sascalc.dataloader.readers import sesans_reader
logger = logging.getLogger(__name__)
FILE_ASSOCIATIONS = {
".xml": cansas_reader,
".ses": sesans_reader,
".sesans": sesans_reader,
".h5": cansas_reader_HDF5,
".hdf": cansas_reader_HDF5,
".hdf5": cansas_reader_HDF5,
".nxs": cansas_reader_HDF5,
".txt": ascii_reader,
".csv": csv_reader,
".dat": red2d_reader,
".abs": abs_reader,
".cor": abs_reader,
".sans": danse_reader,
".pdh": anton_paar_saxs_reader
}
GENERIC_READERS = [
ascii_reader,
cansas_reader,
cansas_reader_HDF5
]
def read_associations(loader, settings=None):
# type: (Registry, {str: FileReader}) -> None
"""
Use the specified settings dictionary to associate readers to file extension.
:param loader: Loader object
:param settings: A dictionary in the format {str(file extension): data_loader_class} that is used to associate a
file extension to a data loader class
"""
# Default to a known list of extensions
if not settings:
settings = FILE_ASSOCIATIONS
# For each FileType entry, get the associated reader and extension
for ext, reader in settings.items():
# Associate the extension with a particular reader
if not loader.associate_file_type(ext.lower(), reader):
msg = f"read_associations: skipping association for {ext.lower()}"
logger.warning(msg)
def get_generic_readers(settings=None, use_generic_readers=True):
# type: ([FileReader], bool) -> []
"""
Returns a list of default readers that the data loader system will use in an attempt to load a data file.
A list of loaders can be passed as an argument which will be appended to (if use_generic is True) or override the
list of generic readers.
:param settings: A list of modules to use as default readers. If None is passed, a default list will be used.
:param use_generic_readers: Boolean to say if the generic readers should be added to the list of readers returned.
:return: Final list of default loader modules the dataloader system will try if necessary
"""
default_readers = GENERIC_READERS.copy() if use_generic_readers else []
if isinstance(settings, (list, set, tuple)):
default_readers.extend(settings)
return default_readers
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/dataloader/readers/associations.py
| 0.4436 | 0.214877 |
associations.py
|
pypi
|
import math
import os
import logging
import numpy as np
from ..data_info import plottable_2D, DataInfo, Detector
from ..manipulations import reader2D_converter
from ..file_reader_base_class import FileReader
from ..loader_exceptions import FileContentsException, DataReaderException
logger = logging.getLogger(__name__)
# Look for unit converter
has_converter = True
try:
from sas.sascalc.data_util.nxsunit import Converter
except:
has_converter = False
class Reader(FileReader):
"""
Example data manipulation
"""
## File type
type_name = "DANSE"
## Wildcards
type = ["DANSE files (*.sans)|*.sans"]
## Extension
ext = ['.sans', '.SANS']
def get_file_contents(self):
self.current_datainfo = DataInfo()
self.current_dataset = plottable_2D()
self.output = []
loaded_correctly = True
error_message = ""
# defaults
# wavelength in Angstrom
wavelength = 10.0
# Distance in meter
distance = 11.0
# Pixel number of center in x
center_x = 65
# Pixel number of center in y
center_y = 65
# Pixel size [mm]
pixel = 5.0
# Size in x, in pixels
size_x = 128
# Size in y, in pixels
size_y = 128
# Format version
fversion = 1.0
self.current_datainfo.filename = os.path.basename(self.f_open.name)
detector = Detector()
self.current_datainfo.detector.append(detector)
self.current_dataset.data = np.zeros([size_x, size_y])
self.current_dataset.err_data = np.zeros([size_x, size_y])
read_on = True
data_start_line = 1
while read_on:
line = self.nextline()
data_start_line += 1
if line.find("DATA:") >= 0:
read_on = False
break
toks = line.split(':')
try:
if toks[0] == "FORMATVERSION":
fversion = float(toks[1])
elif toks[0] == "WAVELENGTH":
wavelength = float(toks[1])
elif toks[0] == "DISTANCE":
distance = float(toks[1])
elif toks[0] == "CENTER_X":
center_x = float(toks[1])
elif toks[0] == "CENTER_Y":
center_y = float(toks[1])
elif toks[0] == "PIXELSIZE":
pixel = float(toks[1])
elif toks[0] == "SIZE_X":
size_x = int(toks[1])
elif toks[0] == "SIZE_Y":
size_y = int(toks[1])
except ValueError as e:
error_message += "Unable to parse {}. Default value used.\n".format(toks[0])
loaded_correctly = False
# Read the data
data = []
error = []
if not fversion >= 1.0:
msg = "danse_reader can't read this file {}".format(self.f_open.name)
raise FileContentsException(msg)
for line_num, data_str in enumerate(self.nextlines()):
toks = data_str.split()
try:
val = float(toks[0])
err = float(toks[1])
data.append(val)
error.append(err)
except ValueError as exc:
msg = "Unable to parse line {}: {}".format(line_num + data_start_line, data_str.strip())
raise FileContentsException(msg)
num_pts = size_x * size_y
if len(data) < num_pts:
msg = "Not enough data points provided. Expected {} but got {}".format(
size_x * size_y, len(data))
raise FileContentsException(msg)
elif len(data) > num_pts:
error_message += ("Too many data points provided. Expected {0} but"
" got {1}. Only the first {0} will be used.\n").format(num_pts, len(data))
loaded_correctly = False
data = data[:num_pts]
error = error[:num_pts]
# Qx and Qy vectors
theta = pixel / distance / 100.0
i_x = np.arange(size_x)
theta = (i_x - center_x + 1) * pixel / distance / 100.0
x_vals = 4.0 * np.pi / wavelength * np.sin(theta / 2.0)
xmin = x_vals.min()
xmax = x_vals.max()
i_y = np.arange(size_y)
theta = (i_y - center_y + 1) * pixel / distance / 100.0
y_vals = 4.0 * np.pi / wavelength * np.sin(theta / 2.0)
ymin = y_vals.min()
ymax = y_vals.max()
self.current_dataset.data = np.array(data, dtype=np.float64).reshape((size_y, size_x))
if fversion > 1.0:
self.current_dataset.err_data = np.array(error, dtype=np.float64).reshape((size_y, size_x))
# Store all data
# Store wavelength
if has_converter and self.current_datainfo.source.wavelength_unit != 'A':
conv = Converter('A')
wavelength = conv(wavelength,
units=self.current_datainfo.source.wavelength_unit)
self.current_datainfo.source.wavelength = wavelength
# Store distance
if has_converter and detector.distance_unit != 'm':
conv = Converter('m')
distance = conv(distance, units=detector.distance_unit)
detector.distance = distance
# Store pixel size
if has_converter and detector.pixel_size_unit != 'mm':
conv = Converter('mm')
pixel = conv(pixel, units=detector.pixel_size_unit)
detector.pixel_size.x = pixel
detector.pixel_size.y = pixel
# Store beam center in distance units
detector.beam_center.x = center_x * pixel
detector.beam_center.y = center_y * pixel
self.current_dataset = self.set_default_2d_units(self.current_dataset)
self.current_dataset.x_bins = x_vals
self.current_dataset.y_bins = y_vals
# Reshape data
x_vals = np.tile(x_vals, (size_y, 1)).flatten()
y_vals = np.tile(y_vals, (size_x, 1)).T.flatten()
if (np.all(self.current_dataset.err_data == None)
or np.any(self.current_dataset.err_data <= 0)):
new_err_data = np.sqrt(np.abs(self.current_dataset.data))
else:
new_err_data = self.current_dataset.err_data.flatten()
self.current_dataset.err_data = new_err_data
self.current_dataset.qx_data = x_vals
self.current_dataset.qy_data = y_vals
self.current_dataset.q_data = np.sqrt(x_vals**2 + y_vals**2)
self.current_dataset.mask = np.ones(len(x_vals), dtype=bool)
# Store loading process information
self.current_datainfo.meta_data['loader'] = self.type_name
self.send_to_output()
if not loaded_correctly:
raise DataReaderException(error_message)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/dataloader/readers/danse_reader.py
| 0.405331 | 0.308412 |
danse_reader.py
|
pypi
|
import os
import numpy as np
from ..file_reader_base_class import FileReader
from ..data_info import plottable_1D, DataInfo
from ..loader_exceptions import FileContentsException
# Check whether we have a converter available
has_converter = True
try:
from sas.sascalc.data_util.nxsunit import Converter
except ImportError:
has_converter = False
_ZERO = 1e-16
class Reader(FileReader):
"""
Class to load sesans files (6 columns).
"""
# File type
type_name = "SESANS"
## Wildcards
type = ["SESANS files (*.ses)|*.ses",
"SESANS files (*..sesans)|*.sesans"]
# List of allowed extensions
ext = ['.ses', '.SES', '.sesans', '.SESANS']
# Flag to bypass extension check
allow_all = True
def get_file_contents(self):
self.current_datainfo = DataInfo()
self.current_dataset = plottable_1D(np.array([]), np.array([]))
self.current_datainfo.isSesans = True
self.output = []
line = self.nextline()
params = {}
while line and not line.startswith("BEGIN_DATA"):
terms = line.split()
if len(terms) >= 2:
params[terms[0]] = " ".join(terms[1:])
line = self.nextline()
self.params = params
if "FileFormatVersion" not in self.params:
raise FileContentsException("SES file missing FileFormatVersion")
if float(self.params["FileFormatVersion"]) >= 2.0:
raise FileContentsException("SASView only supports SES version 1")
if "SpinEchoLength_unit" not in self.params:
raise FileContentsException("SpinEchoLength has no units")
if "Wavelength_unit" not in self.params:
raise FileContentsException("Wavelength has no units")
if params["SpinEchoLength_unit"] != params["Wavelength_unit"]:
raise FileContentsException(
"The spin echo data has rudely used "
"different units for the spin echo length "
"and the wavelength. While sasview could "
"handle this instance, it is a violation "
"of the file format and will not be "
"handled by other software.")
headers = self.nextline().split()
self._insist_header(headers, "SpinEchoLength")
self._insist_header(headers, "Depolarisation")
self._insist_header(headers, "Depolarisation_error")
self._insist_header(headers, "Wavelength")
data = np.loadtxt(self.f_open)
if data.shape[1] != len(headers):
raise FileContentsException(
"File has {} headers, but {} columns".format(
len(headers),
data.shape[1]))
if not data.size:
raise FileContentsException("{} is empty".format(self.filepath))
x = data[:, headers.index("SpinEchoLength")]
if "SpinEchoLength_error" in headers:
dx = data[:, headers.index("SpinEchoLength_error")]
else:
dx = x * 0.05
lam = data[:, headers.index("Wavelength")]
if "Wavelength_error" in headers:
dlam = data[:, headers.index("Wavelength_error")]
else:
dlam = lam * 0.05
y = data[:, headers.index("Depolarisation")]
dy = data[:, headers.index("Depolarisation_error")]
lam_unit = self._unit_fetch("Wavelength")
x, x_unit = self._unit_conversion(x, "A",
self._unit_fetch(
"SpinEchoLength"))
dx, dx_unit = self._unit_conversion(
dx, lam_unit,
self._unit_fetch("SpinEchoLength"))
dlam, dlam_unit = self._unit_conversion(
dlam, lam_unit,
self._unit_fetch("Wavelength"))
y_unit = self._unit_fetch("Depolarisation")
self.current_dataset.x = x
self.current_dataset.y = y
self.current_dataset.lam = lam
self.current_dataset.dy = dy
self.current_dataset.dx = dx
self.current_dataset.dlam = dlam
self.current_datainfo.isSesans = True
self.current_datainfo._yunit = y_unit
self.current_datainfo._xunit = x_unit
self.current_datainfo.source.wavelength_unit = lam_unit
self.current_datainfo.source.wavelength = lam
self.current_datainfo.filename = os.path.basename(self.f_open.name)
self.current_dataset.xaxis(r"\rm{z}", x_unit)
# Adjust label to ln P/(lam^2 t), remove lam column refs
self.current_dataset.yaxis(r"\rm{ln(P)/(t \lambda^2)}", y_unit)
# Store loading process information
self.current_datainfo.meta_data['loader'] = self.type_name
self.current_datainfo.sample.name = params["Sample"]
self.current_datainfo.sample.ID = params["DataFileTitle"]
self.current_datainfo.sample.thickness = self._unit_conversion(
float(params["Thickness"]), "cm",
self._unit_fetch("Thickness"))[0]
self.current_datainfo.sample.zacceptance = (
float(params["Theta_zmax"]),
self._unit_fetch("Theta_zmax"))
self.current_datainfo.sample.yacceptance = (
float(params["Theta_ymax"]),
self._unit_fetch("Theta_ymax"))
self.send_to_output()
@staticmethod
def _insist_header(headers, name):
if name not in headers:
raise FileContentsException(
"Missing {} column in spin echo data".format(name))
@staticmethod
def _unit_conversion(value, value_unit, default_unit):
"""
Performs unit conversion on a measurement.
:param value: The magnitude of the measurement
:param value_unit: a string containing the final desired unit
:param default_unit: string with the units of the original measurement
:return: The magnitude of the measurement in the new units
"""
# (float, string, string) -> float
if has_converter and value_unit != default_unit:
data_conv_q = Converter(default_unit)
value = data_conv_q(value, units=value_unit)
new_unit = default_unit
else:
new_unit = value_unit
return value, new_unit
def _unit_fetch(self, unit):
return self.params[unit+"_unit"]
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/dataloader/readers/sesans_reader.py
| 0.613237 | 0.248022 |
sesans_reader.py
|
pypi
|
import logging
import h5py
import numpy as np
import re
import os
import traceback
from sas.sascalc.dataloader.data_info import plottable_1D, plottable_2D,\
Data1D, Data2D, DataInfo, Process, Aperture, Collimation, \
TransmissionSpectrum, Detector
from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DefaultReaderException
from sas.sascalc.dataloader.file_reader_base_class import FileReader, decode
try:
basestring
except NameError: # CRUFT: python 2 support
basestring = str
logger = logging.getLogger(__name__)
def h5attr(node, key, default=None):
value = node.attrs.get(key, default)
#print("h5attr", node, key, value, type(value))
if isinstance(value, np.ndarray) and value.dtype.char == 'S':
return [decode(el) for el in value]
elif isinstance(value, list):
return [decode(el) for el in value]
else:
return decode(value)
class Reader(FileReader):
"""
A class for reading in NXcanSAS data files. The current implementation has
been tested to load data generated by multiple facilities, all of which are
known to produce NXcanSAS standards compliant data. Any number of data sets
may be present within the file and any dimensionality of data may be used.
Currently 1D and 2D SAS data sets are supported, but should be immediately
extensible to SESANS data.
Any number of SASdata groups may be present in a SASentry and the data
within each SASdata group can be a single 1D I(Q), multi-framed 1D I(Q),
2D I(Qx, Qy) or multi-framed 2D I(Qx, Qy).
:Dependencies:
The NXcanSAS HDF5 reader requires h5py => v2.5.0 or later.
"""
# CanSAS version
cansas_version = 2.0
# Data type name
type_name = "NXcanSAS"
# Wildcards
type = ["NXcanSAS HDF5 Files (*.h5)|*.h5|"]
# List of allowed extensions
ext = ['.h5', '.H5']
# Flag to bypass extension check
allow_all = True
def get_file_contents(self):
"""
This is the general read method that all SasView data_loaders must have.
:param filename: A path for an HDF5 formatted CanSAS 2D data file.
:return: List of Data1D/2D objects and/or a list of errors.
"""
# Reinitialize when loading a new data file to reset all class variables
self.reset_state()
filename = self.f_open.name
self.f_open.close() # IO handled by h5py
# Check that the file exists
if os.path.isfile(filename):
basename = os.path.basename(filename)
_, extension = os.path.splitext(basename)
# If the file type is not allowed, return empty list
if extension in self.ext or self.allow_all:
# Load the data file
try:
self.raw_data = h5py.File(filename, 'r')
except Exception as exc:
if extension not in self.ext:
msg = "NXcanSAS Reader could not load file {}".format(
basename + extension)
raise DefaultReaderException(msg)
raise FileContentsException(exc)
try:
# Read in all child elements of top level SASroot
self.read_children(self.raw_data, [])
# Add the last data set to the list of outputs
self.add_data_set()
except Exception as exc:
raise FileContentsException(exc)
finally:
# Close the data file
self.raw_data.close()
for data_set in self.output:
if isinstance(data_set, Data1D):
if data_set.x.size < 5:
exception = FileContentsException(
"Fewer than 5 data points found.")
data_set.errors.append(exception)
def reset_state(self):
"""
Create the reader object and define initial states for class variables
"""
super(Reader, self).reset_state()
self.data1d = []
self.data2d = []
self.raw_data = None
self.multi_frame = False
self.data_frames = []
self.data_uncertainty_frames = []
self.errors = []
self.logging = []
self.q_names = []
self.mask_name = u''
self.i_name = u''
self.i_node = u''
self.i_uncertainties_name = u''
self.q_uncertainty_names = []
self.q_resolution_names = []
self.parent_class = u''
self.detector = Detector()
self.collimation = Collimation()
self.aperture = Aperture()
self.process = Process()
self.trans_spectrum = TransmissionSpectrum()
def read_children(self, data, parent_list):
"""
A recursive method for stepping through the hierarchical data file.
:param data: h5py Group object of any kind
:param parent: h5py Group parent name
"""
# Loop through each element of the parent and process accordingly
for key in data.keys():
# Get all information for the current key
value = data.get(key)
class_name = h5attr(value, u'canSAS_class')
if isinstance(class_name, (list, tuple, np.ndarray)):
class_name = class_name[0]
if class_name is None:
class_name = h5attr(value, u'NX_class')
if class_name is not None:
class_prog = re.compile(class_name)
else:
class_prog = re.compile(value.name)
if isinstance(value, h5py.Group):
# Set parent class before recursion
last_parent_class = self.parent_class
self.parent_class = class_name
parent_list.append(key)
# If a new sasentry, store the current data sets and create
# a fresh Data1D/2D object
if class_prog.match(u'SASentry'):
self.add_data_set(key)
elif class_prog.match(u'SASdata'):
self._find_data_attributes(value)
self._initialize_new_data_set(value)
# Recursion step to access data within the group
try:
self.read_children(value, parent_list)
self.add_intermediate()
except Exception as e:
self.current_datainfo.errors.append(str(e))
logger.debug(traceback.format_exc())
# Reset parent class when returning from recursive method
self.parent_class = last_parent_class
parent_list.remove(key)
elif isinstance(value, h5py.Dataset):
# If this is a dataset, store the data appropriately
data_set = value[()]
unit = self._get_unit(value)
# Put scalars into lists to be sure they are iterable
if np.isscalar(data_set):
data_set = [data_set]
for data_point in data_set:
if isinstance(data_point, np.ndarray):
if data_point.dtype.char == 'S':
data_point = decode(bytes(data_point))
else:
data_point = decode(data_point)
# Top Level Meta Data
if key == u'definition':
if isinstance(data_set, basestring):
self.current_datainfo.meta_data['reader'] = data_set
break
else:
self.current_datainfo.meta_data[
'reader'] = data_point
# Run
elif key == u'run':
try:
run_name = h5attr(value, 'name', default='name')
run_dict = {data_point: run_name}
self.current_datainfo.run_name = run_dict
except Exception:
pass
if isinstance(data_set, basestring):
self.current_datainfo.run.append(data_set)
break
else:
self.current_datainfo.run.append(data_point)
# Title
elif key == u'title':
if isinstance(data_set, basestring):
self.current_datainfo.title = data_set
break
else:
self.current_datainfo.title = data_point
# Note
elif key == u'SASnote':
self.current_datainfo.notes.append(data_set)
break
# Sample Information
elif self.parent_class == u'SASsample':
self.process_sample(data_point, key)
# Instrumental Information
elif (key == u'name'
and self.parent_class == u'SASinstrument'):
self.current_datainfo.instrument = data_point
# Detector
elif self.parent_class == u'SASdetector':
self.process_detector(data_point, key, unit)
# Collimation
elif self.parent_class == u'SAScollimation':
self.process_collimation(data_point, key, unit)
# Aperture
elif self.parent_class == u'SASaperture':
self.process_aperture(data_point, key)
# Process Information
elif self.parent_class == u'SASprocess': # CanSAS 2.0
self.process_process(data_point, key)
# Source
elif self.parent_class == u'SASsource':
self.process_source(data_point, key, unit)
# Everything else goes in meta_data
elif self.parent_class == u'SASdata':
if isinstance(self.current_dataset, plottable_2D):
self.process_2d_data_object(data_set, key, unit)
else:
self.process_1d_data_object(data_set, key, unit)
break
elif self.parent_class == u'SAStransmission_spectrum':
self.process_trans_spectrum(data_set, key)
break
else:
new_key = self._create_unique_key(
self.current_datainfo.meta_data, key)
self.current_datainfo.meta_data[new_key] = data_point
else:
# I don't know if this reachable code
self.errors.append("ShouldNeverHappenException")
def process_1d_data_object(self, data_set, key, unit):
"""
SASdata processor method for 1d data items
:param data_set: data from HDF5 file
:param key: canSAS_class attribute
:param unit: unit attribute
"""
if key == self.i_name:
if self.multi_frame:
for x in range(0, data_set.shape[0]):
self.data_frames.append(data_set[x].flatten())
else:
self.current_dataset.y = data_set.flatten()
self.current_dataset.yaxis("Intensity", unit)
elif key == self.i_uncertainties_name:
if self.multi_frame:
for x in range(0, data_set.shape[0]):
self.data_uncertainty_frames.append(data_set[x].flatten())
self.current_dataset.dy = data_set.flatten()
elif key in self.q_names:
self.current_dataset.xaxis("Q", unit)
self.current_dataset.x = data_set.flatten()
elif key in self.q_resolution_names:
if (len(self.q_resolution_names) > 1
and np.where(self.q_resolution_names == key)[0] == 0):
self.current_dataset.dxw = data_set.flatten()
elif (len(self.q_resolution_names) > 1
and np.where(self.q_resolution_names == key)[0] == 1):
self.current_dataset.dxl = data_set.flatten()
else:
self.current_dataset.dx = data_set.flatten()
elif key in self.q_uncertainty_names:
if (len(self.q_uncertainty_names) > 1
and np.where(self.q_uncertainty_names == key)[0] == 0):
self.current_dataset.dxw = data_set.flatten()
elif (len(self.q_uncertainty_names) > 1
and np.where(self.q_uncertainty_names == key)[0] == 1):
self.current_dataset.dxl = data_set.flatten()
else:
self.current_dataset.dx = data_set.flatten()
elif key == self.mask_name:
self.current_dataset.mask = data_set.flatten()
elif key == u'wavelength':
self.current_datainfo.source.wavelength = data_set[0]
self.current_datainfo.source.wavelength_unit = unit
def process_2d_data_object(self, data_set, key, unit):
if key == self.i_name:
self.current_dataset.data = data_set
self.current_dataset.zaxis("Intensity", unit)
elif key == self.i_uncertainties_name:
self.current_dataset.err_data = data_set.flatten()
elif key in self.q_names:
self.current_dataset.xaxis("Q_x", unit)
self.current_dataset.yaxis("Q_y", unit)
if self.q_names[0] == self.q_names[1]:
# All q data in a single array
self.current_dataset.qx_data = data_set[0]
self.current_dataset.qy_data = data_set[1]
elif self.q_names.index(key) == 0:
self.current_dataset.qx_data = data_set
elif self.q_names.index(key) == 1:
self.current_dataset.qy_data = data_set
elif key in self.q_resolution_names:
if (len(self.q_resolution_names) == 1
or (self.q_resolution_names[0]
== self.q_resolution_names[1])):
self.current_dataset.dqx_data = data_set[0].flatten()
self.current_dataset.dqy_data = data_set[1].flatten()
elif self.q_resolution_names[0] == key:
self.current_dataset.dqx_data = data_set.flatten()
else:
self.current_dataset.dqy_data = data_set.flatten()
elif key in self.q_uncertainty_names:
if (len(self.q_uncertainty_names) == 1
or (self.q_uncertainty_names[0]
== self.q_uncertainty_names[1])):
self.current_dataset.dqx_data = data_set[0].flatten()
self.current_dataset.dqy_data = data_set[1].flatten()
elif self.q_uncertainty_names[0] == key:
self.current_dataset.dqx_data = data_set.flatten()
else:
self.current_dataset.dqy_data = data_set.flatten()
elif key == self.mask_name:
self.current_dataset.mask = data_set.flatten()
elif key == u'Qy':
self.current_dataset.yaxis("Q_y", unit)
self.current_dataset.qy_data = data_set.flatten()
elif key == u'Qydev':
self.current_dataset.dqy_data = data_set.flatten()
elif key == u'Qx':
self.current_dataset.xaxis("Q_x", unit)
self.current_dataset.qx_data = data_set.flatten()
elif key == u'Qxdev':
self.current_dataset.dqx_data = data_set.flatten()
def process_trans_spectrum(self, data_set, key):
"""
SAStransmission_spectrum processor
:param data_set: data from HDF5 file
:param key: canSAS_class attribute
"""
if key == u'T':
self.trans_spectrum.transmission = data_set.flatten()
elif key == u'Tdev':
self.trans_spectrum.transmission_deviation = data_set.flatten()
elif key == u'lambda':
self.trans_spectrum.wavelength = data_set.flatten()
def process_sample(self, data_point, key):
"""
SASsample processor
:param data_point: Single point from an HDF5 data file
:param key: class name data_point was taken from
"""
if key == u'Title':
self.current_datainfo.sample.name = data_point
elif key == u'name':
self.current_datainfo.sample.name = data_point
elif key == u'ID':
self.current_datainfo.sample.name = data_point
elif key == u'thickness':
self.current_datainfo.sample.thickness = data_point
elif key == u'temperature':
self.current_datainfo.sample.temperature = data_point
elif key == u'transmission':
self.current_datainfo.sample.transmission = data_point
elif key == u'x_position':
self.current_datainfo.sample.position.x = data_point
elif key == u'y_position':
self.current_datainfo.sample.position.y = data_point
elif key == u'pitch':
self.current_datainfo.sample.orientation.x = data_point
elif key == u'yaw':
self.current_datainfo.sample.orientation.y = data_point
elif key == u'roll':
self.current_datainfo.sample.orientation.z = data_point
elif key == u'details':
self.current_datainfo.sample.details.append(data_point)
def process_detector(self, data_point, key, unit):
"""
SASdetector processor
:param data_point: Single point from an HDF5 data file
:param key: class name data_point was taken from
:param unit: unit attribute from data set
"""
if key == u'name':
self.detector.name = data_point
elif key == u'SDD':
self.detector.distance = float(data_point)
self.detector.distance_unit = unit
elif key == u'slit_length':
self.detector.slit_length = float(data_point)
self.detector.slit_length_unit = unit
elif key == u'x_position':
self.detector.offset.x = float(data_point)
self.detector.offset_unit = unit
elif key == u'y_position':
self.detector.offset.y = float(data_point)
self.detector.offset_unit = unit
elif key == u'pitch':
self.detector.orientation.x = float(data_point)
self.detector.orientation_unit = unit
elif key == u'roll':
self.detector.orientation.z = float(data_point)
self.detector.orientation_unit = unit
elif key == u'yaw':
self.detector.orientation.y = float(data_point)
self.detector.orientation_unit = unit
elif key == u'beam_center_x':
self.detector.beam_center.x = float(data_point)
self.detector.beam_center_unit = unit
elif key == u'beam_center_y':
self.detector.beam_center.y = float(data_point)
self.detector.beam_center_unit = unit
elif key == u'x_pixel_size':
self.detector.pixel_size.x = float(data_point)
self.detector.pixel_size_unit = unit
elif key == u'y_pixel_size':
self.detector.pixel_size.y = float(data_point)
self.detector.pixel_size_unit = unit
def process_collimation(self, data_point, key, unit):
"""
SAScollimation processor
:param data_point: Single point from an HDF5 data file
:param key: class name data_point was taken from
:param unit: unit attribute from data set
"""
if key == u'distance':
self.collimation.length = data_point
self.collimation.length_unit = unit
elif key == u'name':
self.collimation.name = data_point
def process_aperture(self, data_point, key):
"""
SASaperture processor
:param data_point: Single point from an HDF5 data file
:param key: class name data_point was taken from
"""
if key == u'shape':
self.aperture.shape = data_point
elif key == u'x_gap':
self.aperture.size.x = data_point
elif key == u'y_gap':
self.aperture.size.y = data_point
def process_source(self, data_point, key, unit):
"""
SASsource processor
:param data_point: Single point from an HDF5 data file
:param key: class name data_point was taken from
:param unit: unit attribute from data set
"""
if key == u'incident_wavelength':
self.current_datainfo.source.wavelength = data_point
self.current_datainfo.source.wavelength_unit = unit
elif key == u'wavelength_max':
self.current_datainfo.source.wavelength_max = data_point
self.current_datainfo.source.wavelength_max_unit = unit
elif key == u'wavelength_min':
self.current_datainfo.source.wavelength_min = data_point
self.current_datainfo.source.wavelength_min_unit = unit
elif key == u'incident_wavelength_spread':
self.current_datainfo.source.wavelength_spread = data_point
self.current_datainfo.source.wavelength_spread_unit = unit
elif key == u'beam_size_x':
self.current_datainfo.source.beam_size.x = data_point
self.current_datainfo.source.beam_size_unit = unit
elif key == u'beam_size_y':
self.current_datainfo.source.beam_size.y = data_point
self.current_datainfo.source.beam_size_unit = unit
elif key == u'beam_shape':
self.current_datainfo.source.beam_shape = data_point
elif key == u'radiation':
self.current_datainfo.source.radiation = data_point
elif key == u'type':
self.current_datainfo.source.type = data_point
elif key == u'probe':
self.current_datainfo.source.probe = data_point
def process_process(self, data_point, key):
"""
SASprocess processor
:param data_point: Single point from an HDF5 data file
:param key: class name data_point was taken from
"""
term_match = re.compile(u'^term[0-9]+$')
if key == u'Title': # CanSAS 2.0
self.process.name = data_point
elif key == u'name': # NXcanSAS
self.process.name = data_point
elif key == u'description':
self.process.description = data_point
elif key == u'date':
self.process.date = data_point
elif term_match.match(key):
self.process.term.append(data_point)
else:
self.process.notes.append(data_point)
def add_intermediate(self):
"""
This method stores any intermediate objects within the final data set
after fully reading the set.
:param parent: The NXclass name for the h5py Group object that just
finished being processed
"""
if self.parent_class == u'SASprocess':
self.current_datainfo.process.append(self.process)
self.process = Process()
elif self.parent_class == u'SASdetector':
self.current_datainfo.detector.append(self.detector)
self.detector = Detector()
elif self.parent_class == u'SAStransmission_spectrum':
self.current_datainfo.trans_spectrum.append(self.trans_spectrum)
self.trans_spectrum = TransmissionSpectrum()
elif self.parent_class == u'SAScollimation':
self.current_datainfo.collimation.append(self.collimation)
self.collimation = Collimation()
elif self.parent_class == u'SASaperture':
self.collimation.aperture.append(self.aperture)
self.aperture = Aperture()
elif self.parent_class == u'SASdata':
if isinstance(self.current_dataset, plottable_2D):
self.data2d.append(self.current_dataset)
elif isinstance(self.current_dataset, plottable_1D):
if self.multi_frame:
for x in range(0, len(self.data_frames)):
self.current_dataset.y = self.data_frames[x]
if len(self.data_uncertainty_frames) > x:
self.current_dataset.dy = \
self.data_uncertainty_frames[x]
self.data1d.append(self.current_dataset)
else:
self.data1d.append(self.current_dataset)
def final_data_cleanup(self):
"""
Does some final cleanup and formatting on self.current_datainfo and
all data1D and data2D objects and then combines the data and info into
Data1D and Data2D objects
"""
# Type cast data arrays to float64
if len(self.current_datainfo.trans_spectrum) > 0:
spectrum_list = []
for spectrum in self.current_datainfo.trans_spectrum:
spectrum.transmission = spectrum.transmission.astype(np.float64)
spectrum.transmission_deviation = \
spectrum.transmission_deviation.astype(np.float64)
spectrum.wavelength = spectrum.wavelength.astype(np.float64)
if len(spectrum.transmission) > 0:
spectrum_list.append(spectrum)
self.current_datainfo.trans_spectrum = spectrum_list
# Append errors to dataset and reset class errors
self.current_datainfo.errors = self.errors
self.errors = []
# Combine all plottables with datainfo and append each to output
# Type cast data arrays to float64 and find min/max as appropriate
for dataset in self.data2d:
# Calculate the actual Q matrix
try:
if dataset.q_data.size <= 1:
dataset.q_data = np.sqrt(dataset.qx_data
* dataset.qx_data
+ dataset.qy_data
* dataset.qy_data).flatten()
except:
dataset.q_data = None
if dataset.data.ndim == 2:
dataset.y_bins = np.unique(dataset.qy_data.flatten())
dataset.x_bins = np.unique(dataset.qx_data.flatten())
dataset.data = dataset.data.flatten()
dataset.qx_data = dataset.qx_data.flatten()
dataset.qy_data = dataset.qy_data.flatten()
try:
iter(dataset.mask)
dataset.mask = np.invert(np.asarray(dataset.mask, dtype=bool))
except TypeError:
dataset.mask = np.ones(dataset.data.shape, dtype=bool)
self.current_dataset = dataset
self.send_to_output()
for dataset in self.data1d:
self.current_dataset = dataset
self.send_to_output()
def add_data_set(self, key=""):
"""
Adds the current_dataset to the list of outputs after preforming final
processing on the data and then calls a private method to generate a
new data set.
:param key: NeXus group name for current tree level
"""
if self.current_datainfo and self.current_dataset:
self.final_data_cleanup()
self.data_frames = []
self.data_uncertainty_frames = []
self.data1d = []
self.data2d = []
self.current_datainfo = DataInfo()
def _initialize_new_data_set(self, value=None):
"""
A private class method to generate a new 1D or 2D data object based on
the type of data within the set. Outside methods should call
add_data_set() to be sure any existing data is stored properly.
:param parent_list: List of names of parent elements
"""
if self._is_2d_not_multi_frame(value):
self.current_dataset = plottable_2D()
else:
x = np.array(0)
y = np.array(0)
self.current_dataset = plottable_1D(x, y)
self.current_datainfo.filename = self.raw_data.filename
@staticmethod
def as_list_or_array(iterable):
"""
Return value as a list if not already a list or array.
:param iterable:
:return:
"""
if not (isinstance(iterable, np.ndarray) or isinstance(iterable, list)):
iterable = iterable.split(",") if isinstance(iterable, basestring)\
else [iterable]
return iterable
def _find_data_attributes(self, value):
"""
A class to find the indices for Q, the name of the Qdev and Idev, and
the name of the mask.
:param value: SASdata/NXdata HDF5 Group
"""
# Initialize values to base types
self.mask_name = u''
self.i_name = u''
self.i_node = u''
self.i_uncertainties_name = u''
self.q_names = []
self.q_uncertainty_names = []
self.q_resolution_names = []
# Get attributes
signal = h5attr(value, "signal", "I")
i_axes = h5attr(value, "I_axes", ["Q"])
q_indices = h5attr(value, "Q_indices", [0])
i_axes = self.as_list_or_array(i_axes)
keys = value.keys()
# Assign attributes to appropriate class variables
self.q_names = [i_axes[int(v)] for v in self.as_list_or_array(q_indices)]
self.mask_name = h5attr(value, "mask")
self.i_name = signal
self.i_node = value.get(self.i_name)
for item in self.q_names:
if item in keys:
q_vals = value.get(item)
uncertainties = h5attr(q_vals, "uncertainties")
if uncertainties is None:
uncertainties = h5attr(q_vals, "uncertainty")
if isinstance(uncertainties, basestring):
uncertainties = uncertainties.split(",")
if uncertainties is not None:
self.q_uncertainty_names = uncertainties
resolutions = h5attr(q_vals, "resolutions")
if isinstance(resolutions, basestring):
resolutions = resolutions.split(",")
if resolutions is not None:
self.q_resolution_names = resolutions
if self.i_name in keys:
i_vals = value.get(self.i_name)
self.i_uncertainties_name = h5attr(i_vals, "uncertainties")
if self.i_uncertainties_name is None:
self.i_uncertainties_name = h5attr(i_vals, "uncertainty")
def _is_2d_not_multi_frame(self, value, i_base="", q_base=""):
"""
A private class to determine if the data set is 1d or 2d.
:param value: Nexus/NXcanSAS data group
:param basename: Approximate name of an entry to search for
:return: True if 2D, otherwise false
"""
i_basename = i_base if i_base != "" else self.i_name
i_vals = value.get(i_basename)
q_basename = q_base if q_base != "" else self.q_names
q_vals = value.get(q_basename[0])
self.multi_frame = (i_vals is not None and q_vals is not None
and len(i_vals.shape) != 1
and len(q_vals.shape) == 1)
return (i_vals is not None and len(i_vals.shape) != 1
and not self.multi_frame)
def _create_unique_key(self, dictionary, name, numb=0):
"""
Create a unique key value for any dictionary to prevent overwriting
Recurses until a unique key value is found.
:param dictionary: A dictionary with any number of entries
:param name: The index of the item to be added to dictionary
:param numb: The number to be appended to the name, starts at 0
:return: The new name for the dictionary entry
"""
if dictionary.get(name) is not None:
numb += 1
name = name.split("_")[0]
name += "_{0}".format(numb)
name = self._create_unique_key(dictionary, name, numb)
return name
def _get_unit(self, value):
"""
Find the unit for a particular value within the h5py dictionary
:param value: attribute dictionary for a particular value set
:return: unit for the value passed to the method
"""
unit = h5attr(value, u'units')
if unit is None:
unit = h5attr(value, u'unit')
return unit
def write(self, filename, dataset):
"""
Export data in NXcanSAS format
:param filename: File path where the data will be saved
:param dataset: DataInfo object that will be converted to NXcanSAS
:return: None
"""
from sas.sascalc.file_converter.nxcansas_writer import NXcanSASWriter
writer = NXcanSASWriter()
writer.write(dataset=[dataset], filename=filename)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/dataloader/readers/cansas_reader_HDF5.py
| 0.448668 | 0.341418 |
cansas_reader_HDF5.py
|
pypi
|
import logging
import numpy as np
from sas.sascalc.data_util.nxsunit import Converter
from ..file_reader_base_class import FileReader
from ..data_info import DataInfo, plottable_1D, Data1D, Detector
from ..loader_exceptions import FileContentsException, DefaultReaderException
logger = logging.getLogger(__name__)
class Reader(FileReader):
"""
Class to load IGOR reduced .ABS files
"""
# File type
type_name = "IGOR 1D"
# Wildcards
type = ["IGOR 1D files (*.abs)|*.abs", "IGOR 1D USANS files (*.cor)|*.cor"]
# List of allowed extensions
ext = ['.abs', '.cor']
def get_file_contents(self):
"""
Get the contents of the file
:raise RuntimeError: when the file can't be opened
:raise ValueError: when the length of the data vectors are inconsistent
"""
buff = self.readall()
filepath = self.f_open.name
lines = buff.splitlines()
self.output = []
self.current_datainfo = DataInfo()
self.current_datainfo.filename = filepath
detector = Detector()
data_line = 0
x_index = 4
self.reset_data_list(len(lines))
self.current_datainfo.detector.append(detector)
self.current_datainfo.filename = filepath
is_info = False
is_center = False
is_data_started = False
base_q_unit = '1/A'
base_i_unit = '1/cm'
data_conv_q = Converter(base_q_unit)
data_conv_i = Converter(base_i_unit)
for line in lines:
# Information line 1
if line.find(".bt5") > 0:
x_index = 0
if is_info:
is_info = False
line_toks = line.split()
# Wavelength in Angstrom
try:
value = float(line_toks[1])
if self.current_datainfo.source.wavelength_unit != 'A':
conv = Converter('A')
self.current_datainfo.source.wavelength = conv(value,
units=self.current_datainfo.source.wavelength_unit)
else:
self.current_datainfo.source.wavelength = value
except KeyError:
msg = "ABSReader cannot read wavelength from %s" % filepath
self.current_datainfo.errors.append(msg)
# Detector distance in meters
try:
value = float(line_toks[3])
if detector.distance_unit != 'm':
conv = Converter('m')
detector.distance = conv(value,
units=detector.distance_unit)
else:
detector.distance = value
except Exception:
msg = "ABSReader cannot read SDD from %s" % filepath
self.current_datainfo.errors.append(msg)
# Transmission
try:
self.current_datainfo.sample.transmission = \
float(line_toks[4])
except ValueError:
# Transmission isn't always in the header
pass
# Sample thickness in mm
try:
# ABS writer adds 'C' with no space to the end of the
# thickness column. Remove it if it is there before
# converting the thickness.
if line_toks[5][-1] not in '012345679.':
value = float(line_toks[5][:-1])
else:
value = float(line_toks[5])
if self.current_datainfo.sample.thickness_unit != 'cm':
conv = Converter('cm')
self.current_datainfo.sample.thickness = conv(value,
units=self.current_datainfo.sample.thickness_unit)
else:
self.current_datainfo.sample.thickness = value
except ValueError:
# Thickness is not a mandatory entry
pass
# MON CNT LAMBDA DET ANG DET DIST TRANS THICK AVE STEP
if line.count("LAMBDA") > 0:
is_info = True
# Find center info line
if is_center:
is_center = False
line_toks = line.split()
# Center in bin number
center_x = float(line_toks[0])
center_y = float(line_toks[1])
# Bin size
if detector.pixel_size_unit != 'mm':
conv = Converter('mm')
detector.pixel_size.x = conv(5.08,
units=detector.pixel_size_unit)
detector.pixel_size.y = conv(5.08,
units=detector.pixel_size_unit)
else:
detector.pixel_size.x = 5.08
detector.pixel_size.y = 5.08
# Store beam center in distance units
# Det 640 x 640 mm
if detector.beam_center_unit != 'mm':
conv = Converter('mm')
detector.beam_center.x = conv(center_x * 5.08,
units=detector.beam_center_unit)
detector.beam_center.y = conv(center_y * 5.08,
units=detector.beam_center_unit)
else:
detector.beam_center.x = center_x * 5.08
detector.beam_center.y = center_y * 5.08
# Detector type
try:
detector.name = line_toks[7]
except:
# Detector name is not a mandatory entry
pass
# BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L BSTOP(mm) DET_TYP
if line.count("BCENT") > 0:
is_center = True
# Parse the data
if is_data_started:
toks = line.split()
try:
_x = float(toks[x_index])
_y = float(toks[1])
_dy = float(toks[2])
_dx = float(toks[3])
if data_conv_q is not None:
_x = data_conv_q(_x, units=base_q_unit)
_dx = data_conv_q(_dx, units=base_q_unit)
if data_conv_i is not None:
_y = data_conv_i(_y, units=base_i_unit)
_dy = data_conv_i(_dy, units=base_i_unit)
self.current_dataset.x[data_line] = _x
self.current_dataset.y[data_line] = _y
self.current_dataset.dy[data_line] = _dy
if _dx > 0:
self.current_dataset.dx[data_line] = _dx
else:
if data_line == 0:
self.current_dataset.dx = None
self.current_dataset.dxl = np.zeros(len(lines))
self.current_dataset.dxw = np.zeros(len(lines))
self.current_dataset.dxl[data_line] = abs(_dx)
self.current_dataset.dxw[data_line] = 0
data_line += 1
except ValueError:
# Could not read this data line. If we are here
# it is because we are in the data section. Just
# skip it.
pass
# SANS Data:
# The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev.
# I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor|
# USANS Data:
# EMP LEVEL: <value> ; BKG LEVEL: <value>
if line.startswith("The 6 columns") or line.startswith("EMP LEVEL"):
is_data_started = True
self.remove_empty_q_values()
# Sanity check
if not len(self.current_dataset.y) == len(self.current_dataset.dy):
self.set_all_to_none()
msg = "abs_reader: y and dy have different length"
raise ValueError(msg)
# If the data length is zero, consider this as
# though we were not able to read the file.
if len(self.current_dataset.x) == 0:
self.set_all_to_none()
raise ValueError("ascii_reader: could not load file")
self.current_dataset = self.set_default_1d_units(self.current_dataset)
if data_conv_q is not None:
self.current_dataset.xaxis("\\rm{Q}", base_q_unit)
if data_conv_i is not None:
self.current_dataset.yaxis("\\rm{Intensity}", base_i_unit)
# Store loading process information
self.current_datainfo.meta_data['loader'] = self.type_name
self.send_to_output()
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/dataloader/readers/abs_reader.py
| 0.468547 | 0.2341 |
abs_reader.py
|
pypi
|
import warnings
import numpy as np
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from scipy.fftpack import dct
from scipy.signal import argrelextrema
from numpy.linalg import lstsq
from sas.sascalc.dataloader.data_info import Data1D
from sas.sascalc.corfunc.transform_thread import FourierThread
from sas.sascalc.corfunc.transform_thread import HilbertThread
class CorfuncCalculator(object):
class _Interpolator(object):
"""
Interpolates between curve f and curve g over the range start:stop and
caches the result of the function when it's called
:param f: The first curve to interpolate
:param g: The second curve to interpolate
:param start: The value at which to start the interpolation
:param stop: The value at which to stop the interpolation
"""
def __init__(self, f, g, start, stop):
self.f = f
self.g = g
self.start = start
self.stop = stop
self._lastx = np.empty(0, dtype='d')
self._lasty = None
def __call__(self, x):
# If input is a single number, evaluate the function at that number
# and return a single number
if isinstance(x, (float, int)):
return self._smoothed_function(np.array([x]))[0]
# If input is a list, and is different to the last input, evaluate
# the function at each point. If the input is the same as last time
# the function was called, return the result that was calculated
# last time instead of explicity evaluating the function again.
if not np.array_equal(x, self._lastx):
self._lastx, self._lasty = x, self._smoothed_function(x)
return self._lasty
def _smoothed_function(self,x):
ys = np.zeros(x.shape)
ys[x <= self.start] = self.f(x[x <= self.start])
ys[x >= self.stop] = self.g(x[x >= self.stop])
with warnings.catch_warnings():
# Ignore divide by zero error
warnings.simplefilter('ignore')
h = 1/(1+(x-self.stop)**2/(self.start-x)**2)
mask = np.logical_and(x > self.start, x < self.stop)
ys[mask] = h[mask]*self.g(x[mask])+(1-h[mask])*self.f(x[mask])
return ys
def __init__(self, data=None, lowerq=None, upperq=None, scale=1):
"""
Initialize the class.
:param data: Data of the type DataLoader.Data1D
:param lowerq: The Q value to use as the boundary for
Guinier extrapolation
:param upperq: A tuple of the form (lower, upper).
Values between lower and upper will be used for Porod extrapolation
:param scale: Scaling factor for I(q)
"""
self._data = None
self.set_data(data, scale)
self.lowerq = lowerq
self.upperq = upperq
self.background = self.compute_background()
self._transform_thread = None
def set_data(self, data, scale=1):
"""
Prepares the data for analysis
:return: new_data = data * scale - background
"""
if data is None:
return
# Only process data of the class Data1D
if not issubclass(data.__class__, Data1D):
raise ValueError("Correlation function cannot be computed with 2D data.")
# Prepare the data
new_data = Data1D(x=data.x, y=data.y)
new_data *= scale
# Ensure the errors are set correctly
if new_data.dy is None or len(new_data.x) != len(new_data.dy) or \
(min(new_data.dy) == 0 and max(new_data.dy) == 0):
new_data.dy = np.ones(len(new_data.x))
self._data = new_data
def compute_background(self, upperq=None):
"""
Compute the background level from the Porod region of the data
"""
if self._data is None: return 0
elif upperq is None and self.upperq is not None: upperq = self.upperq
elif upperq is None and self.upperq is None: return 0
q = self._data.x
mask = np.logical_and(q > upperq[0], q < upperq[1])
_, _, bg = self._fit_porod(q[mask], self._data.y[mask])
return bg
def compute_extrapolation(self):
"""
Extrapolate and interpolate scattering data
:return: The extrapolated data
"""
q = self._data.x
iq = self._data.y
params, s2 = self._fit_data(q, iq)
# Extrapolate to 100*Qmax in experimental data
qs = np.arange(0, q[-1]*100, (q[1]-q[0]))
iqs = s2(qs)
extrapolation = Data1D(qs, iqs)
return params, extrapolation, s2
def compute_transform(self, extrapolation, trans_type, background=None,
completefn=None, updatefn=None):
"""
Transform an extrapolated scattering curve into a correlation function.
:param extrapolation: The extrapolated data
:param background: The background value (if not provided, previously
calculated value will be used)
:param extrap_fn: A callable function representing the extraoplated data
:param completefn: The function to call when the transform calculation
is complete
:param updatefn: The function to call to update the GUI with the status
of the transform calculation
:return: The transformed data
"""
if self._transform_thread is not None:
if self._transform_thread.isrunning(): return
if background is None: background = self.background
if trans_type == 'fourier':
self._transform_thread = FourierThread(self._data, extrapolation,
background, completefn=completefn,
updatefn=updatefn)
elif trans_type == 'hilbert':
self._transform_thread = HilbertThread(self._data, extrapolation,
background, completefn=completefn, updatefn=updatefn)
else:
err = ("Incorrect transform type supplied, must be 'fourier'",
" or 'hilbert'")
raise ValueError(err)
self._transform_thread.queue()
def transform_isrunning(self):
if self._transform_thread is None: return False
return self._transform_thread.isrunning()
def stop_transform(self):
if self._transform_thread.isrunning():
self._transform_thread.stop()
def extract_parameters(self, transformed_data):
"""
Extract the interesting measurements from a correlation function
:param transformed_data: Fourier transformation of the extrapolated data
"""
# Calculate indexes of maxima and minima
x = transformed_data.x
y = transformed_data.y
maxs = argrelextrema(y, np.greater)[0]
mins = argrelextrema(y, np.less)[0]
# If there are no maxima, return None
if len(maxs) == 0:
return None
GammaMin = y[mins[0]] # The value at the first minimum
ddy = (y[:-2]+y[2:]-2*y[1:-1])/(x[2:]-x[:-2])**2 # 2nd derivative of y
dy = (y[2:]-y[:-2])/(x[2:]-x[:-2]) # 1st derivative of y
# Find where the second derivative goes to zero
zeros = argrelextrema(np.abs(ddy), np.less)[0]
# locate the first inflection point
linear_point = zeros[0]
# Try to calculate slope around linear_point using 80 data points
lower = linear_point - 40
upper = linear_point + 40
# If too few data points to the left, use linear_point*2 data points
if lower < 0:
lower = 0
upper = linear_point * 2
# If too few to right, use 2*(dy.size - linear_point) data points
elif upper > len(dy):
upper = len(dy)
width = len(dy) - linear_point
lower = 2*linear_point - dy.size
m = np.mean(dy[lower:upper]) # Linear slope
b = y[1:-1][linear_point]-m*x[1:-1][linear_point] # Linear intercept
Lc = (GammaMin-b)/m # Hard block thickness
# Find the data points where the graph is linear to within 1%
mask = np.where(np.abs((y-(m*x+b))/y) < 0.01)[0]
if len(mask) == 0: # Return garbage for bad fits
return { 'max': self._round_sig_figs(x[maxs[0]], 6) }
dtr = x[mask[0]] # Beginning of Linear Section
d0 = x[mask[-1]] # End of Linear Section
GammaMax = y[mask[-1]]
A = np.abs(GammaMin/GammaMax) # Normalized depth of minimum
params = {
'max': x[maxs[0]],
'dtr': dtr,
'Lc': Lc,
'd0': d0,
'A': A,
'fill': Lc/x[maxs[0]]
}
return params
def _porod(self, q, K, sigma, bg):
"""Equation for the Porod region of the data"""
return bg + (K*q**(-4))*np.exp(-q**2*sigma**2)
def _fit_guinier(self, q, iq):
"""Fit the Guinier region of the curve"""
A = np.vstack([q**2, np.ones(q.shape)]).T
# CRUFT: numpy>=1.14.0 allows rcond=None for the following default
rcond = np.finfo(float).eps * max(A.shape)
return lstsq(A, np.log(iq), rcond=rcond)
def _fit_porod(self, q, iq):
"""Fit the Porod region of the curve"""
fitp = curve_fit(lambda q, k, sig, bg: self._porod(q, k, sig, bg)*q**2,
q, iq*q**2, bounds=([-np.inf, 0, -np.inf], [np.inf, np.inf, np.inf]))[0]
k, sigma, bg = fitp
return k, sigma, bg
def _fit_data(self, q, iq):
"""
Given a data set, extrapolate out to large q with Porod and
to q=0 with Guinier
"""
mask = np.logical_and(q > self.upperq[0], q < self.upperq[1])
# Returns an array where the 1st and 2nd elements are the values of k
# and sigma for the best-fit Porod function
k, sigma, _ = self._fit_porod(q[mask], iq[mask])
bg = self.background
# Smooths between the best-fit porod function and the data to produce a
# better fitting curve
data = interp1d(q, iq)
s1 = self._Interpolator(data,
lambda x: self._porod(x, k, sigma, bg), self.upperq[0], q[-1])
mask = np.logical_and(q < self.lowerq, 0 < q)
# Returns parameters for the best-fit Guinier function
g = self._fit_guinier(q[mask], iq[mask])[0]
# Smooths between the best-fit Guinier function and the Porod curve
s2 = self._Interpolator((lambda x: (np.exp(g[1]+g[0]*x**2))), s1, q[0],
self.lowerq)
params = {'A': g[1], 'B': g[0], 'K': k, 'sigma': sigma}
return params, s2
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/corfunc/corfunc_calculator.py
| 0.780955 | 0.445047 |
corfunc_calculator.py
|
pypi
|
from __future__ import print_function
from sas.sascalc.dataloader.loader_exceptions import NoKnownLoaderException
class ExtensionRegistry(object):
"""
Associate a file loader with an extension.
Note that there may be multiple loaders for the same extension.
Example: ::
registry = ExtensionRegistry()
# Add an association by setting an element
registry['.zip'] = unzip
# Multiple extensions for one loader
registry['.tgz'] = untar
registry['.tar.gz'] = untar
# Generic extensions to use after trying more specific extensions;
# these will be checked after the more specific extensions fail.
registry['.gz'] = gunzip
# Multiple loaders for one extension
registry['.cx'] = cx1
registry['.cx'] = cx2
registry['.cx'] = cx3
# Show registered extensions
print registry.extensions()
# Can also register a format name for explicit control from caller
registry['cx3'] = cx3
print registry.formats()
# Retrieve loaders for a file name
registry.lookup('hello.cx') -> [cx3,cx2,cx1]
# Run loader on a filename
registry.load('hello.cx') ->
try:
return cx3('hello.cx')
except:
try:
return cx2('hello.cx')
except:
return cx1('hello.cx')
# Load in a specific format ignoring extension
registry.load('hello.cx',format='cx3') ->
return cx3('hello.cx')
"""
def __init__(self, **kw):
self.loaders = {}
def __setitem__(self, ext, loader):
if ext not in self.loaders:
self.loaders[ext] = []
self.loaders[ext].insert(0, loader)
def __getitem__(self, ext):
return self.loaders[ext]
def __contains__(self, ext):
return ext in self.loaders
def formats(self):
"""
Return a sorted list of the registered formats.
"""
names = [a for a in self.loaders.keys() if not a.startswith('.')]
names.sort()
return names
def extensions(self):
"""
Return a sorted list of registered extensions.
"""
exts = [a for a in self.loaders.keys() if a.startswith('.')]
exts.sort()
return exts
def lookup(self, path):
"""
Return the loader associated with the file type of path.
:param path: Data file path
:return: List of available readers for the file extension (maybe empty)
"""
# Find matching lower-case extensions
path_lower = path.lower()
extlist = [ext for ext in self.extensions() if path_lower.endswith(ext)]
# Sort matching extensions by decreasing order of length
extlist.sort(key=len)
# Combine loaders for matching extensions into one big list
loaders = []
for L in [self.loaders[ext] for ext in extlist]:
loaders.extend(L)
# Remove duplicates if they exist
if len(loaders) != len(set(loaders)):
result = []
for L in loaders:
if L not in result:
result.append(L)
loaders = result
return loaders
def load(self, path, format=None):
"""
Call the loader for the file type of path.
Raises an exception if the loader fails or if no loaders are defined
for the given path or format.
"""
if format is None:
loaders = self.lookup(path)
if not loaders:
raise NoKnownLoaderException("No loaders match extension in %r"
% path)
else:
loaders = self.loaders.get(format.lower(), [])
if not loaders:
raise NoKnownLoaderException("No loaders match format %r"
% format)
last_exc = None
for fn in loaders:
try:
return fn(path)
except Exception as e:
last_exc = e
pass # give other loaders a chance to succeed
# If we get here it is because all loaders failed
raise last_exc
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/data_util/registry.py
| 0.788217 | 0.21892 |
registry.py
|
pypi
|
import os
import numpy as np
from ..dataloader.data_info import Data2D
class BSLParsingError(Exception):
pass
class BSLLoader:
"""
Loads 2D SAS data from a BSL file.
CLoader is a C extension (found in c_ext/bsl_loader.c)
See http://www.diamond.ac.uk/Beamlines/Soft-Condensed-Matter/small-angle/SAXS-Software/CCP13/BSL.html
for more info on the BSL file format.
"""
def __init__(self, filename):
"""
Parses the BSL header file and sets instance variables apropriately
:param filename: Path to the BSL header file
"""
[folder, filename] = os.path.split(filename)
sasdata_filename = filename.replace('000.', '001.')
if sasdata_filename == filename:
err_msg = ("Invalid header filename {}.\nShould be of the format "
"Xnn000.XXX where X is any alphanumeric character and n is any"
" digit.").format(filename)
raise BSLParsingError(err_msg)
data_info = {}
with open(os.path.join(folder, filename), 'r') as header_file:
data_info = self._parse_header(header_file, filename, sasdata_filename, folder)
self.filename = data_info['filename']
self.n_frames = data_info['frames']
self.n_pixels = data_info['pixels']
self.n_rasters = data_info['rasters']
self.swap_bytes = data_info['swap_bytes']
def _parse_header(self, header_file, filename, sasdata_filename, folder):
"""
Method that parses the header file and returns the metadata in data_info
:param header_file: header file object.
:return: metadata of header file.
"""
data_info = {}
# First 2 lines are headers
header_file.readline()
header_file.readline()
metadata = header_file.readline().strip()
metadata = metadata.split()
data_filename = header_file.readline().strip()
if len(metadata) != 10:
err_msg = "Invalid header file: {}".format(filename)
raise BSLParsingError(err_msg)
if data_filename != sasdata_filename:
last_file = (metadata[9] == '0')
if last_file: # Reached last file we have metadata for
err_msg = "No metadata for {} found in header file: {}"
err_msg = err_msg.format(sasdata_filename, filename)
raise BSLParsingError(err_msg)
try:
data_info = {
'filename': os.path.join(folder, data_filename),
'pixels': int(metadata[0]),
'rasters': int(metadata[1]),
'frames': int(metadata[2]),
'swap_bytes': int(metadata[3])
}
except Exception:
err_msg = "Invalid metadata in header file for {}"
err_msg = err_msg.format(sasdata_filename)
raise BSLParsingError(err_msg)
return data_info
@property
def filename(self):
"""
File to load.
"""
return self._filename
@filename.setter
def filename(self, filename):
self._filename = str(filename)
@property
def n_frames(self):
"""
Number of frames in the file.
"""
return self._n_frames
@n_frames.setter
def n_frames(self, n_frames):
self._n_frames = int(n_frames)
@property
def n_pixels(self):
"""
Number of pixels in the file.
"""
return self._n_pixels
@n_pixels.setter
def n_pixels(self, n_pixels):
self._n_pixels = int(n_pixels)
@property
def n_rasters(self):
"""
Number of rasters in the file.
"""
return self._n_rasters
@n_rasters.setter
def n_rasters(self, n_rasters):
self._n_rasters = int(n_rasters)
@property
def swap_bytes(self):
"""
Whether or not the bytes are in reverse order.
"""
return self._swap_bytes
@swap_bytes.setter
def swap_bytes(self, swap_bytes):
self._swap_bytes = bool(swap_bytes)
def load_frames(self, frames):
"""
Loads all frames of the BSl file into a Data2D object.
:param frames: Number of frames.
:return: Data2D frame_data.
"""
frame_data = []
# Prepare axis values (arbitrary scale)
x = self.n_rasters * list(range(1, self.n_pixels+1))
y = [self.n_pixels * [i] for i in list(range(1, self.n_rasters+1))]
y = np.reshape(y, (1, self.n_pixels*self.n_rasters))[0]
x_bins = x[:self.n_pixels]
y_bins = y[0::self.n_pixels]
for frame in frames:
raw_frame_data = self.load_data(frame)
data2d = Data2D(data=raw_frame_data, qx_data=x, qy_data=y)
data2d.x_bins = x_bins
data2d.y_bins = y_bins
data2d.Q_unit = '' # Using arbitrary units
frame_data.append(data2d)
return frame_data
def load_data(self, frame):
"""
Loads the file named in filename in 4 byte float, in either
little or big Endian depending on self.swap_bytes.
:param frame: The frame to load.
:return: np array of loaded floats.
"""
# Set dtype to 4 byte float, big or little endian depending on swap_bytes.
dtype = ('>f4', '<f4')[self.swap_bytes]
# Size of float as stored in binary file should be 4 bytes.
float_size = 4
offset = self.n_pixels * self.n_rasters * frame * float_size
with open(self.filename, 'rb') as input_file:
input_file.seek(offset)
# CRUFT: With numpy 1.17, could use np.fromfile(self.filename, dtype=dtype, offset=offset).
data = np.float64(np.fromfile(input_file, dtype=dtype))
return data
def __str__(self):
"""
Print the objects params.
:return: string description of object parameters.
"""
desc = "Filename: " + self.filename + "\n"
desc += "n_frames: " + str(self.n_frames) + "\n"
desc += "n_pixels: " + str(self.n_pixels) + "\n"
desc += "n_rasters: " + str(self.n_rasters) + "\n"
desc += "swap_bytes: " + str(self.swap_bytes) + "\n"
return desc
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/file_converter/bsl_loader.py
| 0.588416 | 0.325628 |
bsl_loader.py
|
pypi
|
from sas.sascalc.dataloader.data_info import Data2D
from sas.sascalc.file_converter.nxcansas_writer import NXcanSASWriter
import numpy as np
# ISIS 2D ASCII File Format
# http://www.isis.stfc.ac.uk/instruments/loq/software/colette-ascii-file-format-descriptions9808.pdf
# line: property
# 0: File header
# 1: q_x axis label and units
# 2: q_y axis label and units
# 3: Intensity axis label and units
# 4: n_use_rec - number of lines of user content following this line
# 5 to (5+n_use_rec): user content
# Number of qx points
# List of qx points
# Number of qy points
# List of qy points
# numqx numqy scale
class ASCII2DLoader(object):
def __init__(self, data_path):
"""
:param data_path: The path to the file to load
"""
self.data_path = data_path
def load(self):
"""
Load the data from the file into a Data2D object
:return: A Data2D instance containing data from the file
:raises ValueError: Raises a ValueError if the file is incorrectly
formatted
"""
with open(self.data_path, 'r') as file_handle:
file_buffer = file_handle.read()
all_lines = file_buffer.splitlines()
# Load num_points line-by-line from lines into a numpy array,
# starting on line number start_line
def _load_points(lines, start_line, num_points):
qs = np.zeros(num_points)
n = start_line
filled = 0
while filled < num_points:
row = np.fromstring(lines[n], dtype=np.float32, sep=' ')
qs[filled:filled+len(row)] = row
filled += len(row)
n += 1
return n, qs
current_line = 4
try:
# Skip n_use_rec lines
n_use_rec = int(all_lines[current_line].strip()[0])
current_line += n_use_rec + 1
# Read qx data
num_qs = int(all_lines[current_line].strip())
current_line += 1
current_line, qx = _load_points(all_lines, current_line, num_qs)
# Read qy data
num_qs = int(all_lines[current_line].strip())
current_line += 1
current_line, qy = _load_points(all_lines, current_line, num_qs)
except ValueError as e:
err_msg = "File incorrectly formatted.\n"
if str(e).find('broadcast') != -1:
err_msg += "Incorrect number of q data points provided. "
err_msg += "Expected {}.".format(num_qs)
elif str(e).find('invalid literal') != -1:
err_msg += ("Expected integer on line {}. "
"Instead got '{}'").format(current_line + 1,
all_lines[current_line])
else:
err_msg += str(e)
raise ValueError(err_msg)
# dimensions: [width, height, scale]
try:
dimensions = np.fromstring(all_lines[current_line],
dtype=np.float32, sep=' ')
if len(dimensions) != 3: raise ValueError()
width = int(dimensions[0])
height = int(dimensions[1])
except ValueError as e:
err_msg = "File incorrectly formatted.\n"
err_msg += ("Expected line {} to be of the form: <num_qx> "
"<num_qy> <scale>.").format(current_line + 1)
err_msg += " Instead got '{}'.".format(all_lines[current_line])
raise ValueError(err_msg)
if width > len(qx) or height > len(qy):
err_msg = "File incorrectly formatted.\n"
err_msg += ("Line {} says to use {}x{} points. "
"Only {}x{} provided.").format(current_line + 1, width,
height, len(qx), len(qy))
raise ValueError(err_msg)
# More qx and/or qy points can be provided than are actually used
qx = qx[:width]
qy = qy[:height]
current_line += 1
# iflag = 1 => Only intensity data (not dealt with here)
# iflag = 2 => q axis and intensity data
# iflag = 3 => q axis, intensity and error data
try:
iflag = int(all_lines[current_line].strip()[0])
if iflag <= 0 or iflag > 3: raise ValueError()
except:
err_msg = "File incorrectly formatted.\n"
iflag = all_lines[current_line].strip()[0]
err_msg += ("Expected iflag on line {} to be 1, 2 or 3. "
"Instead got '{}'.").format(current_line+1, iflag)
raise ValueError(err_msg)
current_line += 1
try:
current_line, I = _load_points(all_lines, current_line,
width * height)
dI = np.zeros(width*height)
# Load error data if it's provided
if iflag == 3:
_, dI = _load_points(all_lines, current_line, width*height)
except Exception as e:
err_msg = "File incorrectly formatted.\n"
if str(e).find("list index") != -1:
err_msg += ("Incorrect number of data points. Expected {}"
" intensity").format(width * height)
if iflag == 3:
err_msg += " and error"
err_msg += " points."
else:
err_msg += str(e)
raise ValueError(err_msg)
# Format data for use with Data2D
qx = list(qx) * height
qy = np.array([[y] * width for y in qy]).flatten()
data = Data2D(qx_data=qx, qy_data=qy, data=I, err_data=dI)
return data
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/file_converter/ascii2d_loader.py
| 0.702938 | 0.413596 |
ascii2d_loader.py
|
pypi
|
import os
import struct
try:
from itertoops import izip as zip
except ImportError:
pass
import numpy as np
class CStyleStruct:
"""A nice and easy way to get "C-style struct" functionality."""
def __init__(self, **kwds):
self.__dict__.update(kwds)
class OTOKOParsingError(Exception):
pass
class OTOKOData:
def __init__(self, q_axis, data_axis):
self.q_axis = q_axis
self.data_axis = data_axis
class OTOKOLoader(object):
def __init__(self, qaxis_path, data_path):
self.qaxis_path = qaxis_path
self.data_path = data_path
def load_otoko_data(self):
"""
Loads "OTOKO" data, which is a format that stores each axis separately.
An axis is represented by a "header" file, which in turn will give details
of one or more binary files where the actual data is stored.
Given the paths of two header files, this function will load each axis in
turn. If loading is successful then an instance of the OTOKOData class
will be returned, else an exception will be raised.
For more information on the OTOKO file format, please see:
http://www.diamond.ac.uk/Home/Beamlines/small-angle/SAXS-Software/CCP13/
XOTOKO.html
"""
q_axis = self._load_otoko_axis(self.qaxis_path)
data_axis = self._load_otoko_axis(self.data_path)
return OTOKOData(q_axis, data_axis)
def _load_otoko_axis(self, header_path):
"""
Loads an "OTOKO" axis, given the header file path. Essentially, the
header file contains information about the data in the form of integer
"indicators", as well as the names of each of the binary files which are
assumed to be in the same directory as the header.
"""
if not os.path.exists(header_path):
raise OTOKOParsingError("The header file %s does not exist." % header_path)
binary_file_info_list = []
total_frames = 0
header_dir = os.path.dirname(os.path.abspath(header_path))
with open(header_path, "r") as header_file:
lines = header_file.readlines()
if len(lines) < 4:
raise OTOKOParsingError("Expected more lines in %s." % header_path)
info = lines[0] + lines[1]
def pairwise(iterable):
"""
s -> (s0,s1), (s2,s3), (s4, s5), ...
From http://stackoverflow.com/a/5389547/778572
"""
a = iter(iterable)
return zip(a, a)
for indicators, filename in pairwise(lines[2:]):
indicators = indicators.split()
if len(indicators) != 10:
raise OTOKOParsingError(
"Expected 10 integer indicators on line 3 of %s." \
% header_path)
if not all([i.isdigit() for i in indicators]):
raise OTOKOParsingError(
"Expected all indicators on line 3 of %s to be integers." \
% header_path)
binary_file_info = CStyleStruct(
# The indicators at indices 4 to 8 are always zero since they
# have been reserved for future use by the format. Also, the
# "last_file" indicator seems to be there for legacy reasons,
# as it doesn't appear to be something we have to bother
# enforcing correct use of; we just define the last file as
# being the last file in the list.
file_path = os.path.join(header_dir, filename.strip()),
n_channels = int(indicators[0]),
n_frames = int(indicators[1]),
dimensions = int(indicators[2]),
swap_bytes = int(indicators[3]) == 0,
last_file = int(indicators[9]) == 0 # We don't use this.
)
if binary_file_info.dimensions != 1:
msg = "File {} has {} dimensions, expected 1. Is it a BSL file?"
raise OTOKOParsingError(msg.format(filename.strip(),
binary_file_info.dimensions))
binary_file_info_list.append(binary_file_info)
total_frames += binary_file_info.n_frames
# Check that all binary files are listed in the header as having the same
# number of channels, since I don't think CorFunc can handle ragged data.
all_n_channels = [info.n_channels for info in binary_file_info_list]
if not all(all_n_channels[0] == c for c in all_n_channels):
raise OTOKOParsingError(
"Expected all binary files listed in %s to have the same number of channels." % header_path)
data = np.zeros(shape=(total_frames, all_n_channels[0]))
frames_so_far = 0
for info in binary_file_info_list:
if not os.path.exists(info.file_path):
raise OTOKOParsingError(
"The data file %s does not exist." % info.file_path)
with open(info.file_path, "rb") as binary_file:
# Ideally we'd like to use numpy's fromfile() to read in binary
# data, but we are forced to roll our own float-by-float file
# reader because of the rules imposed on us by the file format;
# namely, if the swap indicator flag has been raised then the bytes
# of each float occur in reverse order.
for frame in range(info.n_frames):
for channel in range(info.n_channels):
b = bytes(binary_file.read(4))
if info.swap_bytes:
b = b[::-1] # "Extended slice" syntax, used to reverse.
value = struct.unpack('f', b)[0]
data[frames_so_far + frame][channel] = value
frames_so_far += info.n_frames
return CStyleStruct(
header_path = header_path,
data = data,
binary_file_info_list = binary_file_info_list,
header_info = info
)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/file_converter/otoko_loader.py
| 0.678753 | 0.359814 |
otoko_loader.py
|
pypi
|
from sas.sascalc.dataloader.readers.cansas_reader import Reader as CansasReader
from sas.sascalc.dataloader.data_info import Data1D
import inspect
class CansasWriter(CansasReader):
def write(self, filename, frame_data, sasentry_attrs=None):
"""
Write the content of a Data1D as a CanSAS XML file
:param filename: name of the file to write
:param datainfo: Data1D object
"""
# Create XML document
doc, _ = self._to_xml_doc(frame_data, sasentry_attrs)
if self.encoding is None:
self.encoding = "UTF-8"
# Write the file
with open(filename, 'wb') as file_ref:
doc.write(file_ref, encoding=self.encoding,
pretty_print=True, xml_declaration=True)
def _to_xml_doc(self, frame_data, sasentry_attrs=None):
"""
Create an XML document to contain the content of an array of Data1Ds
:param frame_data: An array of Data1D objects
"""
valid_class = all([issubclass(data.__class__, Data1D) for data in frame_data])
if not valid_class:
raise RuntimeError("The cansas writer expects an array of "
"Data1D instances")
# Get PIs and create root element
pi_string = self._get_pi_string()
# Define namespaces and create SASroot object
main_node = self._create_main_node()
# Create ElementTree, append SASroot and apply processing instructions
base_string = pi_string + self.to_string(main_node)
base_element = self.create_element_from_string(base_string)
doc = self.create_tree(base_element)
# Create SASentry Element
entry_node = self.create_element("SASentry", sasentry_attrs)
root = doc.getroot()
root.append(entry_node)
# Use the first element in the array for writing metadata
datainfo = frame_data[0]
# Add Title to SASentry
self.write_node(entry_node, "Title", datainfo.title)
# Add Run to SASentry
self._write_run_names(datainfo, entry_node)
# Add Data info to SASEntry
for data_info in frame_data:
self._write_data(data_info, entry_node)
# Transmission Spectrum Info
self._write_trans_spectrum(datainfo, entry_node)
# Sample info
self._write_sample_info(datainfo, entry_node)
# Instrument info
instr = self._write_instrument(datainfo, entry_node)
# Source
self._write_source(datainfo, instr)
# Collimation
self._write_collimation(datainfo, instr)
# Detectors
self._write_detectors(datainfo, instr)
# Processes info
self._write_process_notes(datainfo, entry_node)
# Note info
self._write_notes(datainfo, entry_node)
# Return the document, and the SASentry node associated with
# the data we just wrote
return doc, entry_node
def _write_data(self, datainfo, entry_node):
"""
Writes the I and Q data to the XML file
:param datainfo: The Data1D object the information is coming from
:param entry_node: lxml node ElementTree object to be appended to
"""
node = self.create_element("SASdata")
self.append(node, entry_node)
for i in range(len(datainfo.x)):
point = self.create_element("Idata")
node.append(point)
self.write_node(point, "Q", datainfo.x[i],
{'unit': datainfo.x_unit})
if len(datainfo.y) >= i:
self.write_node(point, "I", datainfo.y[i],
{'unit': datainfo.y_unit})
if datainfo.dy is not None and len(datainfo.dy) > i:
self.write_node(point, "Idev", datainfo.dy[i],
{'unit': datainfo.y_unit})
if datainfo.dx is not None and len(datainfo.dx) > i:
self.write_node(point, "Qdev", datainfo.dx[i],
{'unit': datainfo.x_unit})
if datainfo.dxw is not None and len(datainfo.dxw) > i:
self.write_node(point, "dQw", datainfo.dxw[i],
{'unit': datainfo.x_unit})
if datainfo.dxl is not None and len(datainfo.dxl) > i:
self.write_node(point, "dQl", datainfo.dxl[i],
{'unit': datainfo.x_unit})
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/file_converter/cansas_writer.py
| 0.533884 | 0.290437 |
cansas_writer.py
|
pypi
|
import h5py
import numpy as np
from sas.sascalc.dataloader.data_info import Data1D, Data2D
PROBES = ['neutron', 'x-ray', 'muon', 'electron', 'ultraviolet',
'visible light', 'positron', 'proton']
TYPES = ['Spallation Neutron Source', 'UV Plasma Source', 'Free-Electron Laser',
'Reactor Neutron Source', 'Synchrotron X-ray Source', 'UV Laser',
'Pulsed Muon Source', 'Pulsed Reactor Neutron Source', 'Ion Source',
'Rotating Anode X-ray', 'Optical Laser', 'Fixed Tube X-ray']
class NXcanSASWriter:
"""
A class for writing in NXcanSAS data files. Any number of data sets may be
written to the file. Currently 1D and 2D SAS data sets are supported
NXcanSAS spec: http://download.nexusformat.org/sphinx/classes/contributed_definitions/NXcanSAS.html
:Dependencies:
The NXcanSAS writer requires h5py => v2.5.0 or later.
"""
def write(self, dataset, filename):
"""
Write an array of Data1d or Data2D objects to an NXcanSAS file, as
one SASEntry with multiple SASData elements. The metadata of the first
elememt in the array will be written as the SASentry metadata
(detector, instrument, sample, etc).
:param dataset: A list of Data1D or Data2D objects to write
:param filename: Where to write the NXcanSAS file
"""
def _h5_string(string):
"""
Convert a string to a numpy string in a numpy array. This way it is
written to the HDF5 file as a fixed length ASCII string and is
compatible with the Reader read() method.
"""
if isinstance(string, np.ndarray):
return string
elif not isinstance(string, str):
string = str(string)
return np.array([np.string_(string)])
def _write_h5_string(entry, value, key):
entry[key] = _h5_string(value)
def _h5_float(x):
if not (isinstance(x, list)):
x = [x]
return np.array(x, dtype=np.float32)
def _write_h5_float(entry, value, key):
entry.create_dataset(key, data=_h5_float(value))
def _write_h5_vector(entry, vector, names=['x_position', 'y_position'],
units=None, write_fn=_write_h5_string):
"""
Write a vector to an h5 entry
:param entry: The H5Py entry to write to
:param vector: The Vector to write
:param names: What to call the x,y and z components of the vector
when writing to the H5Py entry
:param units: The units of the vector (optional)
:param write_fn: A function to convert the value to the required
format and write it to the H5Py entry, of the form
f(entry, value, name) (optional)
"""
if len(names) < 2:
raise ValueError("Length of names must be >= 2.")
if vector.x is not None:
write_fn(entry, vector.x, names[0])
if units is not None:
entry[names[0]].attrs['units'] = units
if vector.y is not None:
write_fn(entry, vector.y, names[1])
if units is not None:
entry[names[1]].attrs['units'] = units
if len(names) == 3 and vector.z is not None:
write_fn(entry, vector.z, names[2])
if units is not None:
entry[names[2]].attrs['units'] = units
valid_data = all([isinstance(d, (Data1D, Data2D)) for d in dataset])
if not valid_data:
raise ValueError("All entries of dataset must be Data1D or Data2D"
"objects")
# Get run name and number from first Data object
data_info = dataset[0]
run_number = ''
run_name = ''
if len(data_info.run) > 0:
run_number = data_info.run[0]
if len(data_info.run_name) > 0:
run_name = data_info.run_name[run_number]
f = h5py.File(filename, 'w')
sasentry = f.create_group('sasentry01')
sasentry['definition'] = _h5_string('NXcanSAS')
sasentry['run'] = _h5_string(run_number)
sasentry['run'].attrs['name'] = run_name
sasentry['title'] = _h5_string(data_info.title)
sasentry.attrs['canSAS_class'] = 'SASentry'
sasentry.attrs['version'] = '1.1'
for i, data_obj in enumerate(dataset):
data_entry = sasentry.create_group("sasdata{0:0=2d}".format(i+1))
data_entry.attrs['canSAS_class'] = 'SASdata'
if isinstance(data_obj, Data1D):
self._write_1d_data(data_obj, data_entry)
elif isinstance(data_obj, Data2D):
self._write_2d_data(data_obj, data_entry)
data_info = dataset[0]
# Sample metadata
sample_entry = sasentry.create_group('sassample')
sample_entry.attrs['canSAS_class'] = 'SASsample'
sample_entry['ID'] = _h5_string(data_info.sample.name)
sample_attrs = ['thickness', 'temperature', 'transmission']
for key in sample_attrs:
if getattr(data_info.sample, key) is not None:
sample_entry.create_dataset(key,
data=_h5_float(getattr(data_info.sample, key)))
_write_h5_vector(sample_entry, data_info.sample.position)
# NXcanSAS doesn't save information about pitch, only roll
# and yaw. The _write_h5_vector method writes vector.y, but we
# need to write vector.z for yaw
data_info.sample.orientation.y = data_info.sample.orientation.z
_write_h5_vector(sample_entry, data_info.sample.orientation,
names=['polar_angle', 'azimuthal_angle'])
if data_info.sample.details is not None\
and data_info.sample.details != []:
details = None
if len(data_info.sample.details) > 1:
details = [np.string_(d) for d in data_info.sample.details]
details = np.array(details)
elif data_info.sample.details != []:
details = _h5_string(data_info.sample.details[0])
if details is not None:
sample_entry.create_dataset('details', data=details)
# Instrument metadata
instrument_entry = sasentry.create_group('sasinstrument')
instrument_entry.attrs['canSAS_class'] = 'SASinstrument'
instrument_entry['name'] = _h5_string(data_info.instrument)
# Source metadata
source_entry = instrument_entry.create_group('sassource')
source_entry.attrs['canSAS_class'] = 'SASsource'
if data_info.source.radiation is not None:
if data_info.source.radiation in PROBES:
source_entry['probe'] = _h5_string(data_info.source.radiation)
elif data_info.source.radiation in TYPES:
source_entry['type'] = _h5_string(data_info.source.radiation)
else:
# Should not get here, but we shouldn't throw info out either
source_entry['notes'] = _h5_string(data_info.source.radiation)
if data_info.source.type is not None:
source_entry['type'] = _h5_string(data_info.source.type)
if data_info.source.probe is not None:
source_entry['probe'] = _h5_string(data_info.source.probe)
if data_info.source.beam_shape is not None:
source_entry['beam_shape'] = _h5_string(data_info.source.beam_shape)
wavelength_keys = { 'wavelength': 'incident_wavelength',
'wavelength_min':'wavelength_min',
'wavelength_max': 'wavelength_max',
'wavelength_spread': 'incident_wavelength_spread' }
for sasname, nxname in wavelength_keys.items():
value = getattr(data_info.source, sasname)
units = getattr(data_info.source, sasname + '_unit')
if value is not None:
source_entry[nxname] = _h5_float(value)
source_entry[nxname].attrs['units'] = units
_write_h5_vector(source_entry, data_info.source.beam_size,
names=['beam_size_x', 'beam_size_y'],
units=data_info.source.beam_size_unit, write_fn=_write_h5_float)
# Collimation metadata
if len(data_info.collimation) > 0:
for i, coll_info in enumerate(data_info.collimation):
collimation_entry = instrument_entry.create_group(
'sascollimation{0:0=2d}'.format(i + 1))
collimation_entry.attrs['canSAS_class'] = 'SAScollimation'
if coll_info.length is not None:
_write_h5_float(collimation_entry, coll_info.length, 'SDD')
collimation_entry['SDD'].attrs['units'] =\
coll_info.length_unit
if coll_info.name is not None:
collimation_entry['name'] = _h5_string(coll_info.name)
else:
# Create a blank one - at least 1 collimation required by format
instrument_entry.create_group('sascollimation01')
# Detector metadata
if len(data_info.detector) > 0:
i = 1
for i, det_info in enumerate(data_info.detector):
detector_entry = instrument_entry.create_group(
'sasdetector{0:0=2d}'.format(i + 1))
detector_entry.attrs['canSAS_class'] = 'SASdetector'
if det_info.distance is not None:
_write_h5_float(detector_entry, det_info.distance, 'SDD')
detector_entry['SDD'].attrs['units'] =\
det_info.distance_unit
if det_info.name is not None:
detector_entry['name'] = _h5_string(det_info.name)
else:
detector_entry['name'] = _h5_string('')
if det_info.slit_length is not None:
_write_h5_float(detector_entry, det_info.slit_length,
'slit_length')
detector_entry['slit_length'].attrs['units'] =\
det_info.slit_length_unit
_write_h5_vector(detector_entry, det_info.offset)
# NXcanSAS doesn't save information about pitch, only roll
# and yaw. The _write_h5_vector method writes vector.y, but we
# need to write vector.z for yaw
det_info.orientation.y = det_info.orientation.z
_write_h5_vector(detector_entry, det_info.orientation,
names=['polar_angle', 'azimuthal_angle'])
_write_h5_vector(detector_entry, det_info.beam_center,
names=['beam_center_x', 'beam_center_y'],
write_fn=_write_h5_float, units=det_info.beam_center_unit)
_write_h5_vector(detector_entry, det_info.pixel_size,
names=['x_pixel_size', 'y_pixel_size'],
write_fn=_write_h5_float, units=det_info.pixel_size_unit)
else:
# Create a blank one - at least 1 detector required by format
detector_entry = instrument_entry.create_group('sasdetector01')
detector_entry.attrs['canSAS_class'] = 'SASdetector'
detector_entry.attrs['name'] = ''
# Process meta data
for i, process in enumerate(data_info.process):
process_entry = sasentry.create_group('sasprocess{0:0=2d}'.format(
i + 1))
process_entry.attrs['canSAS_class'] = 'SASprocess'
if process.name:
name = _h5_string(process.name)
process_entry.create_dataset('name', data=name)
if process.date:
date = _h5_string(process.date)
process_entry.create_dataset('date', data=date)
if process.description:
desc = _h5_string(process.description)
process_entry.create_dataset('description', data=desc)
for j, term in enumerate(process.term):
# Don't save empty terms
if term:
h5_term = _h5_string(term)
process_entry.create_dataset('term{0:0=2d}'.format(
j + 1), data=h5_term)
for j, note in enumerate(process.notes):
# Don't save empty notes
if note:
h5_note = _h5_string(note)
process_entry.create_dataset('note{0:0=2d}'.format(
j + 1), data=h5_note)
# Transmission Spectrum
for i, trans in enumerate(data_info.trans_spectrum):
trans_entry = sasentry.create_group(
'sastransmission_spectrum{0:0=2d}'.format(i + 1))
trans_entry.attrs['canSAS_class'] = 'SAStransmission_spectrum'
trans_entry.attrs['signal'] = 'T'
trans_entry.attrs['T_axes'] = 'T'
trans_entry.attrs['name'] = trans.name
# Timestamp can be a string or a date-like object. Only write if set
if trans.timestamp:
trans_entry.attrs['timestamp'] = trans.timestamp
transmission = trans_entry.create_dataset('T',
data=trans.transmission)
transmission.attrs['unertainties'] = 'Tdev'
trans_entry.create_dataset('Tdev',
data=trans.transmission_deviation)
trans_entry.create_dataset('lambda', data=trans.wavelength)
if np.isscalar(data_info.notes) and data_info.notes:
# Handle scalars that aren't empty arrays, None, '', etc.
notes = [np.string_(data_info.notes)]
elif len(data_info.notes) > 1:
# Handle iterables that aren't empty
notes = [np.string_(n) for n in data_info.notes]
else:
# Notes are required in NXcanSAS format
notes = [np.string_('')]
if notes is not None:
for i, note in enumerate(notes):
note_entry = sasentry.create_group('sasnote{0}'.format(i))
note_entry.attrs['canSAS_class'] = 'SASnote'
note_entry.create_dataset('SASnote', data=note)
f.close()
def _write_1d_data(self, data_obj, data_entry):
"""
Writes the contents of a Data1D object to a SASdata h5py Group
:param data_obj: A Data1D object to write to the file
:param data_entry: A h5py Group object representing the SASdata
"""
data_entry.attrs['signal'] = 'I'
data_entry.attrs['I_axes'] = 'Q'
data_entry.attrs['Q_indices'] = [0]
q_entry = data_entry.create_dataset('Q', data=data_obj.x)
q_entry.attrs['units'] = data_obj.x_unit
i_entry = data_entry.create_dataset('I', data=data_obj.y)
i_entry.attrs['units'] = data_obj.y_unit
if data_obj.dy is not None:
i_entry.attrs['uncertainties'] = 'Idev'
i_dev_entry = data_entry.create_dataset('Idev', data=data_obj.dy)
i_dev_entry.attrs['units'] = data_obj.y_unit
if data_obj.dx is not None:
q_entry.attrs['resolutions'] = 'dQ'
dq_entry = data_entry.create_dataset('dQ', data=data_obj.dx)
dq_entry.attrs['units'] = data_obj.x_unit
elif data_obj.dxl is not None:
q_entry.attrs['resolutions'] = ['dQl','dQw']
dql_entry = data_entry.create_dataset('dQl', data=data_obj.dxl)
dql_entry.attrs['units'] = data_obj.x_unit
dqw_entry = data_entry.create_dataset('dQw', data=data_obj.dxw)
dqw_entry.attrs['units'] = data_obj.x_unit
def _write_2d_data(self, data, data_entry):
"""
Writes the contents of a Data2D object to a SASdata h5py Group
:param data: A Data2D object to write to the file
:param data_entry: A h5py Group object representing the SASdata
"""
data_entry.attrs['signal'] = 'I'
data_entry.attrs['I_axes'] = 'Qx,Qy'
data_entry.attrs['Q_indices'] = [0,1]
(n_rows, n_cols) = (len(data.y_bins), len(data.x_bins))
if (n_rows == 0 and n_cols == 0) or (n_cols*n_rows != data.data.size):
# Calculate rows and columns, assuming detector is square
# Same logic as used in PlotPanel.py _get_bins
n_cols = int(np.floor(np.sqrt(len(data.qy_data))))
n_rows = int(np.floor(len(data.qy_data) / n_cols))
if n_rows * n_cols != len(data.qy_data):
raise ValueError("Unable to calculate dimensions of 2D data")
intensity = np.reshape(data.data, (n_rows, n_cols))
qx = np.reshape(data.qx_data, (n_rows, n_cols))
qy = np.reshape(data.qy_data, (n_rows, n_cols))
i_entry = data_entry.create_dataset('I', data=intensity)
i_entry.attrs['units'] = data.I_unit
qx_entry = data_entry.create_dataset('Qx', data=qx)
qx_entry.attrs['units'] = data.Q_unit
qy_entry = data_entry.create_dataset('Qy', data=qy)
qy_entry.attrs['units'] = data.Q_unit
if (data.err_data is not None
and not all(v is None for v in data.err_data)):
d_i = np.reshape(data.err_data, (n_rows, n_cols))
i_entry.attrs['uncertainties'] = 'Idev'
i_dev_entry = data_entry.create_dataset('Idev', data=d_i)
i_dev_entry.attrs['units'] = data.I_unit
if (data.dqx_data is not None
and not all(v is None for v in data.dqx_data)):
qx_entry.attrs['resolutions'] = 'dQx'
dqx_entry = data_entry.create_dataset('dQx', data=data.dqx_data)
dqx_entry.attrs['units'] = data.Q_unit
if (data.dqy_data is not None
and not all(v is None for v in data.dqy_data)):
qy_entry.attrs['resolutions'] = 'dQy'
dqy_entry = data_entry.create_dataset('dQy', data=data.dqy_data)
dqy_entry.attrs['units'] = data.Q_unit
if data.mask is not None and not all(v is None for v in data.mask):
data_entry.attrs['mask'] = "mask"
mask = np.invert(np.asarray(data.mask, dtype=bool))
data_entry.create_dataset('mask', data=mask)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/file_converter/nxcansas_writer.py
| 0.638159 | 0.624666 |
nxcansas_writer.py
|
pypi
|
import os
import numpy as np
from sas.sascalc.dataloader.data_info import Data1D
from sas.sascalc.file_converter.nxcansas_writer import NXcanSASWriter
from sas.sascalc.file_converter.bsl_loader import BSLLoader
from sas.sascalc.file_converter.otoko_loader import OTOKOLoader
from sas.sascalc.file_converter.cansas_writer import CansasWriter
def extract_ascii_data(filename):
"""
Extracts data from a single-column ASCII file
:param filename: The file to load data from
:return: A numpy array containing the extracted data
"""
try:
data = np.loadtxt(filename, dtype=str)
except:
# Check if file is a BSL or OTOKO header file
f = open(filename, 'r')
f.readline()
f.readline()
bsl_metadata = f.readline().strip().split()
f.close()
if len(bsl_metadata) == 10:
msg = ("Error parsing ASII data. {} looks like a BSL or OTOKO "
"header file.")
raise IOError(msg.format(os.path.split(filename)[-1]))
if len(data.shape) != 1:
msg = "Error reading {}: Only one column of data is allowed"
raise IOError(msg.format(filename.split('\\')[-1]))
is_float = True
try:
float(data[0])
except:
is_float = False
if not is_float:
end_char = data[0][-1]
# If lines end with comma or semi-colon, trim the last character
if end_char in (',', ';'):
data = [s[0:-1] for s in data]
else:
msg = ("Error reading {}: Lines must end with a digit, comma "
"or semi-colon").format(filename.split('\\')[-1])
raise IOError(msg)
return np.array(data, dtype=np.float32)
def extract_otoko_data(qfile, ifile):
"""
Extracts data from a 1D OTOKO file
:param filename: The OTOKO file to load the data from
:return: A numpy array containing the extracted data
"""
loader = OTOKOLoader(qfile, ifile)
otoko_data = loader.load_otoko_data()
qdata = otoko_data.q_axis.data
iqdata = otoko_data.data_axis.data
if len(qdata) > 1:
msg = ("Q-Axis file has multiple frames. Only 1 frame is "
"allowed for the Q-Axis")
raise IOError(msg)
qdata = qdata[0]
return qdata, iqdata
def convert_2d_data(dataset, output, metadata):
"""
Wrapper for the NX SAS writer call
Sets external metadata on the dataset first.
"""
for key, value in metadata.items():
setattr(dataset[0], key, value)
w = NXcanSASWriter()
w.write(dataset, output)
def convert_to_cansas(frame_data, filepath, run_name, single_file):
"""
Saves an array of Data1D objects to a single CanSAS file with multiple
<SasData> elements, or to multiple CanSAS files, each with one
<SasData> element.
:param frame_data: If single_file is true, an array of Data1D
objects. If single_file is false, a dictionary of the
form *{frame_number: Data1D}*.
:param filepath: Where to save the CanSAS file
:param single_file: If true, array is saved as a single file,
if false, each item in the array is saved to it's own file
"""
writer = CansasWriter()
entry_attrs = None
if run_name != '':
entry_attrs = {'name': run_name}
if single_file:
writer.write(filepath, frame_data,
sasentry_attrs=entry_attrs)
else:
# Folder and base filename
[group_path, group_name] = os.path.split(filepath)
ext = "." + group_name.split('.')[-1] # File extension
for frame_number, frame_d in frame_data.items():
# Append frame number to base filename
filename = group_name.replace(ext, str(frame_number)+ext)
destination = os.path.join(group_path, filename)
writer.write(destination, [frame_d],
sasentry_attrs=entry_attrs)
def toFloat(text):
"""
Dumb string->float converter
"""
value = None
try:
value = float(text) if text != "" else None
except ValueError:
pass
return value
|
/sasview-5.0.6b3-py3-none-any.whl/sas/sascalc/file_converter/FileConverterUtilities.py
| 0.472927 | 0.378832 |
FileConverterUtilities.py
|
pypi
|
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
import simple_history.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="ServiceAccount",
fields=[
(
"id",
models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False, verbose_name="created"
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False, verbose_name="modified"
),
),
("created_by", models.EmailField(max_length=254)),
("service_account_data", models.JSONField()),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="HistoricalServiceAccount",
fields=[
(
"id",
models.BigIntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False, verbose_name="created"
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False, verbose_name="modified"
),
),
("created_by", models.EmailField(max_length=254)),
("service_account_data", models.JSONField()),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField(db_index=True)),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")], max_length=1
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "historical service account",
"verbose_name_plural": "historical service accounts",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": ("history_date", "history_id"),
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
|
/sat_automations-0.5.16rc2-py3-none-any.whl/sat_automations/manage_automations/migrations/0001_initial.py
| 0.50708 | 0.152284 |
0001_initial.py
|
pypi
|
from datetime import datetime, timedelta
from django.contrib import admin # noqa: F401
from sat_automations.housing_automation.models import (
AssignRevokeLog,
AssignRevokeTracker,
Clearance,
RoomUseCode,
)
# Custom ListFilters
class TrackerNullFilter(admin.SimpleListFilter):
title = "Status"
parameter_name = "Status"
def lookups(self, request, model_admin):
return ("null", "Not Processed"), ("not_null", "Processed")
def queryset(self, request, queryset):
if self.value() == "null":
return queryset.filter(status__isnull=True)
if self.value() == "not_null":
return queryset.filter(status__isnull=False)
class TrackerDateFilterBase(admin.SimpleListFilter):
delta_plus_three = (datetime.now() + timedelta(days=3)).date()
yesterday = (datetime.now() - timedelta(days=1)).date()
tomorrow = (datetime.now() + timedelta(days=1)).date()
now = datetime.now().date()
def lookups(self, request, model_admin):
return (
("yesterday", "Yesterday"),
("today", "Today"),
("tomorrow", "Tomorrow"),
("next_three_days", "Within Next Three Days"),
)
class MoveInFilter(TrackerDateFilterBase):
title = "Move In Date"
parameter_name = "move_in_date"
def queryset(self, request, queryset):
if self.value() == "next_three_days":
return queryset.filter(
move_in_date__gte=self.now, move_in_date__lte=self.delta_plus_three
)
if self.value() == "yesterday":
return queryset.filter(move_in_date=self.yesterday)
if self.value() == "today":
return queryset.filter(move_in_date=self.now)
if self.value() == "tomorrow":
return queryset.filter(move_in_date=self.tomorrow)
class MoveOutFilter(TrackerDateFilterBase):
title = "Move Out Date"
parameter_name = "move_out_date"
def queryset(self, request, queryset):
if self.value() == "next_three_days":
return queryset.filter(
move_out_date__gte=self.now, move_out_date__lte=self.delta_plus_three
)
if self.value() == "yesterday":
return queryset.filter(move_out_date=self.yesterday)
if self.value() == "today":
return queryset.filter(move_out_date=self.now)
if self.value() == "tomorrow":
return queryset.filter(move_out_date=self.tomorrow)
class AssignRevokeLogAdmin(admin.ModelAdmin):
list_display = [
"building_code",
"campus_id",
"clearance_type",
"status",
"message",
"has_extra_info",
"created",
]
readonly_fields = [
"building_code",
"campus_id",
"clearance_type",
"status",
"message",
"extra_info",
"created",
]
search_fields = ["building_code", "campus_id"]
list_filter = ["clearance_type", "status", "message", "created"]
class Meta:
model = AssignRevokeLog
def has_module_permission(self, request, obj=None):
if not request.user.is_staff:
return False
return True
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
def has_extra_info(self, obj):
return obj.extra_info is not None
has_extra_info.boolean = True
class AssignRevokeTrackerAdmin(admin.ModelAdmin):
list_display = [
"campus_id",
"building_code",
"move_in_date",
"move_out_date",
"status",
"created",
"modified",
]
readonly_fields = [
"campus_id",
"building_code",
"move_in_date",
"move_out_date",
"status",
"created",
"modified",
]
search_fields = ["building_code", "campus_id"]
list_filter = ["status", "created", MoveInFilter, MoveOutFilter, TrackerNullFilter]
class Meta:
model = AssignRevokeTracker
def has_module_permission(self, request, obj=None):
if not request.user.is_staff:
return False
return True
class RoomUseCodeAdmin(admin.ModelAdmin):
list_display = ["name", "clearance", "created", "modified"]
list_filter = ["clearance", "created", "modified"]
ordering = ["name"]
search_fields = ["name"]
class Meta:
model = RoomUseCode
class ClearanceAdmin(admin.ModelAdmin):
list_display = ["name", "created", "modified"]
list_filter = ["name", "created", "modified"]
ordering = ["name"]
search_fields = ["name"]
class Meta:
model = Clearance
# Register your models here.
admin.site.register(AssignRevokeLog, AssignRevokeLogAdmin)
admin.site.register(AssignRevokeTracker, AssignRevokeTrackerAdmin)
admin.site.register(Clearance, ClearanceAdmin)
admin.site.register(RoomUseCode, RoomUseCodeAdmin)
|
/sat_automations-0.5.16rc2-py3-none-any.whl/sat_automations/housing_automation/admin.py
| 0.591605 | 0.199698 |
admin.py
|
pypi
|
from dateutil.parser import parse
from django.db import models # noqa: F401
from model_utils.models import TimeStampedModel
from simple_history.models import HistoricalRecords
class ClearanceType(models.TextChoices):
ASSIGN = "assign", "Assign"
REVOKE = "revoke", "Revoke"
class StatusType(models.TextChoices):
PASS = "pass", "Pass"
FAIL = "fail", "Fail"
MODIFIED = "modified", "Modified"
class LogMessageType(models.TextChoices):
BUILDING_CODE_NOT_FOUND = "BC_NF", "Building code not found"
BUILDING_CODE_NO_CLEARANCE = "BC_NC", "Building code has no clearance"
ASSIGNMENT_FAILED = "FC", "Failed Clearance Assignment Operation"
ASSIGNMENT_SUCCESS = "SC", "Successful Clearance Assignment Operation"
# Create your models here.
# Non-Django classes
class PeopleSoftProxyBase:
def __init__(self, access_token=None) -> None:
self.base_url = ""
self.access_token = access_token
def get_person(self, campus_id):
pass
def get_housing(self, action: ClearanceType):
pass
class AssignRevokeLog(TimeStampedModel):
"""A model to track the results of the HousingAssignRevoke automation."""
campus_id = models.IntegerField()
building_code = models.CharField(max_length=64)
clearance_type = models.CharField(
choices=ClearanceType.choices, max_length=max(len(x) for x in ClearanceType.values)
)
status = models.CharField(
choices=StatusType.choices, max_length=max(len(x) for x in StatusType.values)
)
message = models.CharField(
choices=LogMessageType.choices, max_length=max(len(x) for x in LogMessageType.values)
)
extra_info = models.TextField(blank=True, null=True)
class Meta:
verbose_name = "Assign/Revoke Log"
verbose_name_plural = "Assign/Revoke Logs"
indexes = [
models.Index(fields=["campus_id", "building_code"]),
]
def __str__(self):
return f"{self.campus_id}: {self.building_code} - {self.clearance_type} - {self.status}"
class AssignRevokeTracker(TimeStampedModel):
campus_id = models.IntegerField()
move_in_date = models.DateField()
move_out_date = models.DateField()
building_code = models.CharField(max_length=64)
status = models.CharField(
choices=StatusType.choices, max_length=max(len(x) for x in StatusType.values), null=True
)
class Meta:
verbose_name = "Assign/Revoke Tracker"
verbose_name_plural = "Assign/Revoke Tracker"
indexes = [
models.Index(fields=["campus_id", "building_code"]),
]
def __str__(self):
return f"{self.campus_id}: {self.status}"
def match_peoplesoft_list(self, ps_list) -> bool:
match = (
str(self.campus_id),
self.building_code,
str(self.move_in_date),
str(self.move_out_date),
)
for ps in ps_list:
against = (
ps.get("campus_id"),
ps.get("room_second_description"),
f'{parse(ps.get("move_in_date")).date()}',
f'{parse(ps.get("move_out_date")).date()}',
)
if match == against:
return True
return False
class Clearance(TimeStampedModel):
name = models.CharField(max_length=128)
object_id = models.IntegerField()
history = HistoricalRecords()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Meta:
verbose_name = "Clearance"
verbose_name_plural = "Clearances"
unique_together = ("object_id", "name")
indexes = [
models.Index(fields=["object_id", "name"]),
]
class RoomUseCode(TimeStampedModel):
name = models.CharField(max_length=128)
clearance = models.ForeignKey(Clearance, on_delete=models.PROTECT, related_name="building_code")
def __str__(self):
return self.name
class Meta:
verbose_name = "Room Use Code"
verbose_name_plural = "Room Use Codes"
|
/sat_automations-0.5.16rc2-py3-none-any.whl/sat_automations/housing_automation/models.py
| 0.782164 | 0.228942 |
models.py
|
pypi
|
import django.utils.timezone
import model_utils.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="AssignRevokeLog",
fields=[
(
"id",
models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False, verbose_name="created"
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False, verbose_name="modified"
),
),
("campus_id", models.IntegerField()),
("building_code", models.CharField(max_length=64)),
(
"clearance_type",
models.CharField(
choices=[("assign", "Assign"), ("revoke", "Revoke")], max_length=6
),
),
(
"status",
models.CharField(choices=[("pass", "Pass"), ("fail", "Fail")], max_length=4),
),
(
"message",
models.CharField(
choices=[
("BC_NF", "Building code not found"),
("BC_NC", "Building code has no clearance"),
("FC", "Failed Clearance Assignment Operation"),
("SC", "Successful Clearance Assignment Operation"),
],
max_length=5,
),
),
("extra_info", models.TextField(blank=True, null=True)),
],
options={
"verbose_name": "Assign/Revoke Log",
"verbose_name_plural": "Assign/Revoke Logs",
"indexes": [
models.Index(
fields=["campus_id", "building_code"], name="housing_aut_campus__04fdb0_idx"
)
],
},
),
]
|
/sat_automations-0.5.16rc2-py3-none-any.whl/sat_automations/housing_automation/migrations/0001_initial.py
| 0.498291 | 0.18429 |
0001_initial.py
|
pypi
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
import simple_history.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("housing_automation", "0004_alter_assignrevoketracker_options"),
]
operations = [
migrations.CreateModel(
name="Clearance",
fields=[
(
"id",
models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.CharField(max_length=128)),
("object_id", models.IntegerField()),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
],
options={
"verbose_name": "Clearance",
"verbose_name_plural": "Clearances",
},
),
migrations.CreateModel(
name="RoomUseCode",
fields=[
(
"id",
models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False, verbose_name="created"
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False, verbose_name="modified"
),
),
("name", models.CharField(max_length=128)),
(
"clearance",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="building_code",
to="housing_automation.clearance",
),
),
],
options={
"verbose_name": "Room Use Code",
"verbose_name_plural": "Room Use Codes",
},
),
migrations.CreateModel(
name="HistoricalClearance",
fields=[
(
"id",
models.BigIntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
("name", models.CharField(max_length=128)),
("object_id", models.IntegerField()),
("created", models.DateTimeField(blank=True, editable=False)),
("modified", models.DateTimeField(blank=True, editable=False)),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField(db_index=True)),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")], max_length=1
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "historical Clearance",
"verbose_name_plural": "historical Clearances",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": ("history_date", "history_id"),
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.AddIndex(
model_name="clearance",
index=models.Index(fields=["object_id", "name"], name="housing_aut_object__61b21b_idx"),
),
migrations.AlterUniqueTogether(
name="clearance",
unique_together={("object_id", "name")},
),
]
|
/sat_automations-0.5.16rc2-py3-none-any.whl/sat_automations/housing_automation/migrations/0005_clearance_roomusecode_historicalclearance_and_more.py
| 0.538741 | 0.18429 |
0005_clearance_roomusecode_historicalclearance_and_more.py
|
pypi
|
from datetime import datetime, timedelta
from typing import Union
class SatDatetime:
@staticmethod
def get_from_datetime(date):
if date >= datetime(2022, 10, 10): # >=5000
delta = date - datetime(2009, 1, 31)
convert = delta.days + (delta.seconds + delta.microseconds / 1000000) / 86400
else: # <5000
delta = date - datetime(2017, 12, 25)
convert = (delta.days + (delta.seconds + delta.microseconds / 1000000) / 86400) / 7 * 20
return SatDatetime(convert)
def __init__(self,
year: float, month: float = 1, day: float = 1,
hour: float = 0, minute: float = 0, second: float = 0):
minute += second / 60
hour += minute / 60
day += -1 + hour / 24
month += -1 + day / 22
year += month / 8
self.year = 1
self.month = 1
self.day = 1
self.hour = 0
self.minute = 0
self.second = 0.0
self.refresh_by_year(year)
def __copy__(self):
return SatDatetime(self.year, self.month, self.day, self.hour, self.minute, self.second)
def __str__(self):
""" A user-friendly string representation of SatTimedelta. """
return f'{self.year}. {self.month}. {self.day:02d}. {self.hour:02d}:{self.minute:02d}:{self.second:09.6f}'
def __repr__(self):
""" A programmer-friendly string representation of SatTimedelta. """
return f'SatDatetime({self.year}, {self.month}, {self.day}, {self.hour}, {self.minute}, {self.second})'
def __add__(self, other: 'SatTimedelta') -> 'SatDatetime':
""" Adds SatTimedelta object from self and returns new SatTimedelta object. """
if isinstance(other, SatTimedelta):
return SatDatetime(self.year + other.years, self.month + other.months, self.day + other.days,
self.hour + other.hours, self.minute + other.minutes, self.second + other.seconds)
else:
raise TypeError(f'SatDatetime can only be added to SatTimedelta, not {type(other)}')
def __sub__(self, other: 'SatTimedelta') -> 'SatDatetime':
""" Subtracts SatTimedelta object from self and returns new SatTimedelta object. """
if isinstance(other, SatTimedelta):
return SatDatetime(self.year - other.years, self.month - other.months, self.day - other.days,
self.hour - other.hours, self.minute - other.minutes, self.second - other.seconds)
else:
raise TypeError(f'SatDatetime can only be subtracted from SatTimedelta, not {type(other)}')
def __mul__(self, other: float) -> 'SatDatetime':
""" Multiplies SatTimedelta object by float and returns new SatTimedelta object. """
return SatDatetime(self.year * other, self.month * other, self.day * other,
self.hour * other, self.minute * other, self.second * other)
def __truediv__(self, other: float) -> 'SatDatetime':
""" Divides SatTimedelta object by float and returns new SatTimedelta object. """
return SatDatetime(self.year / other, self.month / other, self.day / other,
self.hour / other, self.minute / other, self.second / other)
def __lt__(self, other: 'SatDatetime') -> bool:
return self.get_on_year() < other.get_on_year()
def __le__(self, other: 'SatDatetime') -> bool:
return self.get_on_year() <= other.get_on_year()
def __eq__(self, other: 'SatDatetime') -> bool:
return self.get_on_year() == other.get_on_year()
def __ne__(self, other: 'SatDatetime') -> bool:
return self.get_on_year() != other.get_on_year()
def __gt__(self, other: 'SatDatetime') -> bool:
return self.get_on_year() > other.get_on_year()
def __ge__(self, other: 'SatDatetime') -> bool:
return self.get_on_year() >= other.get_on_year()
def refresh_by_year(self, year):
year, month = int(year), (year - int(year)) * 8
month, day = int(month) + 1, (month - int(month)) * 22
day, hour = int(day) + 1, (day - int(day)) * 24
hour, minute = int(hour), (hour - int(hour)) * 60
minute, second = int(minute), (minute - int(minute)) * 60
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
def get_on_year(self):
minute = self.minute + self.second / 60
hour = self.hour + minute / 60
day = self.day - 1 + hour / 24
month = self.month - 1 + day / 22
year = self.year + month / 8
return year
def to_datetime(self):
c = self.get_on_year()
if c < 5000:
days_plus = c / 20 * 7
return datetime(2017, 12, 25) + timedelta(days=days_plus)
else:
return datetime(2009, 1, 31) + timedelta(days=c)
class SatTimedelta:
def __init__(self,
years: float = 0, months: float = 0, days: float = 0,
hours: float = 0, minutes: float = 0, seconds: float = 0):
self.years = years
self.months = months
self.days = days
self.hours = hours
self.minutes = minutes
self.seconds = seconds
seconds = self.get_in_seconds()
self.years = seconds // 15206400
seconds -= self.years * 15206400
self.months = seconds // 1900800
seconds -= self.months * 1900800
self.days = seconds // 86400
seconds -= self.days * 86400
self.hours = seconds // 3600
seconds -= self.hours * 3600
self.minutes = seconds // 60
seconds -= self.minutes * 60
self.seconds = seconds
def __str__(self):
return f'{self.days + self.months * 22 + self.years * 176:d} days, ' \
f'{self.hours:d} hours {self.minutes:d} minutes {self.seconds} seconds'
def __repr__(self):
return f'SatTimedelta({self.years}, {self.months}, {self.days}, {self.hours}, {self.minutes}, {self.seconds})'
def __add__(self, other: 'SatTimedelta') -> Union['SatTimedelta', SatDatetime]:
if isinstance(other, SatTimedelta):
return SatTimedelta(self.years + other.years, self.months + other.months, self.days + other.days,
self.hours + other.hours, self.minutes + other.minutes, self.seconds + other.seconds)
elif isinstance(other, SatDatetime):
return SatDatetime(self.years + other.year, self.months + other.month, self.days + other.day,
self.hours + other.hour, self.minutes + other.minute, self.seconds + other.second)
else:
raise TypeError(f'SatTimedelta can only be added to SatTimedelta or SatDatetime, not {type(other)}')
def __sub__(self, other: 'SatTimedelta') -> 'SatTimedelta':
if isinstance(other, SatTimedelta):
return SatTimedelta(self.years - other.years, self.months - other.months, self.days - other.days,
self.hours - other.hours, self.minutes - other.minutes, self.seconds - other.seconds)
else:
raise TypeError(f'SatTimedelta can only be subtracted from SatTimedelta, not {type(other)}')
def __mul__(self, other: float) -> 'SatTimedelta':
return SatTimedelta(self.years * other, self.months * other, self.days * other,
self.hours * other, self.minutes * other, self.seconds * other)
def __truediv__(self, other: float) -> 'SatTimedelta':
return SatTimedelta(self.years / other, self.months / other, self.days / other,
self.hours / other, self.minutes / other, self.seconds / other)
def __floordiv__(self, other: float) -> 'SatTimedelta':
return SatTimedelta(self.years // other, self.months // other, self.days // other,
self.hours // other, self.minutes // other, self.seconds // other)
def get_in_seconds(self):
return self.seconds \
+ self.minutes * 60 \
+ self.hours * 3600 \
+ self.days * 86400 \
+ self.months * 1900800 \
+ self.years * 15206400
def get_in_years(self) -> float:
return self.get_in_seconds() / 15206400
|
/sat_datetime-1.0.1-py3-none-any.whl/sat_datetime/sat_datetime.py
| 0.907764 | 0.375105 |
sat_datetime.py
|
pypi
|
from datetime import date, datetime
from enum import Enum
from re import S
from dataclasses import dataclass
from typing import List
class EstadoDTE(Enum):
TODOS = ""
VIGENTES = "V"
ANULADAS = "I"
class TypeFEL(Enum):
RECIBIDA = "R"
EMITIDA = "E"
@dataclass
class SatCredentials:
username: str
password: str
@dataclass
class SATFELFilters:
establecimiento: int
estadoDte: EstadoDTE
fechaInicio: date
fechaFin: date
tipo: TypeFEL
@dataclass
class Address:
street: str
zip_code: int
city: str
state: str
country: str = "GT"
@classmethod
def builder(cls):
return Builder(cls)
@dataclass
class ContactModel:
nit: str
commercial_name: str
address: Address
email: str
@classmethod
def builder(cls):
return Builder(cls)
@dataclass
class IssuingModel(ContactModel):
nit: str
commercial_name: str
issuing_name: str
address: str
vat_affiliation: str
establishment: str
@classmethod
def builder(cls):
return Builder(cls)
@dataclass
class Tax:
short_name: str
tax_code: str
taxable_ammount: float
tax_ammount: float
@dataclass
class InvoiceLine(object):
good_or_service: str
description: str
quantity: float
unit_price: float
total_line: float
total: float
line_number: int
discount: float
@classmethod
def builder(cls):
return Builder(cls)
@dataclass
class InvoiceHeaders:
issue_date: datetime.date
invoice_type: str
issuer: IssuingModel
receiver: ContactModel
currency: str
@classmethod
def builder(cls):
return Builder(cls)
@dataclass
class TotalTax:
tax_name: str
tax_total: float
@classmethod
def builder(cls):
return Builder(cls)
@dataclass
class InvoiceTotals:
total_taxes: List[TotalTax]
grand_total: float
@classmethod
def builder(cls):
return Builder(cls)
@dataclass
class Invoice:
headers: InvoiceHeaders
lines: List[InvoiceLine]
totals: InvoiceTotals
fel_signature: str
fel_invoice_serie: str
fel_invoice_number: str
def __str__(self):
lines_format = "INVOICE LINES\n"
for line in self.lines:
lines_format += str(line)
lines_format += "\n"
return "{3} - {4} - {5}\n{0}\n{1}\n{2}".format(
self.headers,
lines_format,
self.totals,
self.fel_signature,
self.fel_invoice_serie,
self.fel_invoice_number,
)
@classmethod
def builder(cls):
return InvoiceBuilder(cls)
class Builder:
def __init__(self, cls):
self.attrs = {}
self.cls = cls
def __getattr__(self, name):
if name[0:3] == "set":
def setter(x):
field_name = name[4:]
self.attrs[field_name] = x
return self
return setter
else:
return None
def build(self):
return self.cls(**self.attrs)
class InvoiceBuilder(Builder):
def __init__(self, cls):
self.attrs = {}
self.cls = cls
def with_headers(self, headers):
self.attrs["headers"] = headers
return self
def with_lines(self, lines):
self.attrs["lines"] = lines
return self
def with_totals(self, totals):
self.attrs["totals"] = totals
return self
|
/sat_gt_fel_invoices_downloader-0.5.13.tar.gz/sat_gt_fel_invoices_downloader-0.5.13/src/sat_gt_fel_invoices_downloader/models.py
| 0.649356 | 0.271928 |
models.py
|
pypi
|
Six: Python 2 and 3 Compatibility Library
=========================================
.. module:: six
:synopsis: Python 2 and 3 compatibility
.. moduleauthor:: Benjamin Peterson <[email protected]>
.. sectionauthor:: Benjamin Peterson <[email protected]>
Six provides simple utilities for wrapping over differences between Python 2 and
Python 3. It is intended to support codebases that work on both Python 2 and 3
without modification. six consists of only one Python file, so it is painless
to copy into a project.
Six can be downloaded on `PyPI <https://pypi.org/project/six/>`_. Its bug
tracker and code hosting is on `GitHub <https://github.com/benjaminp/six>`_.
The name, "six", comes from the fact that 2*3 equals 6. Why not addition?
Multiplication is more powerful, and, anyway, "five" has already been snatched
away by the (admittedly now moribund) Zope Five project.
Indices and tables
------------------
* :ref:`genindex`
* :ref:`search`
Package contents
----------------
.. data:: PY2
A boolean indicating if the code is running on Python 2.
.. data:: PY3
A boolean indicating if the code is running on Python 3.
Constants
>>>>>>>>>
Six provides constants that may differ between Python versions. Ones ending
``_types`` are mostly useful as the second argument to ``isinstance`` or
``issubclass``.
.. data:: class_types
Possible class types. In Python 2, this encompasses old-style
:data:`py2:types.ClassType` and new-style ``type`` classes. In Python 3,
this is just ``type``.
.. data:: integer_types
Possible integer types. In Python 2, this is :func:`py2:long` and
:func:`py2:int`, and in Python 3, just :func:`py3:int`.
.. data:: string_types
Possible types for text data. This is :func:`py2:basestring` in Python 2 and
:func:`py3:str` in Python 3.
.. data:: text_type
Type for representing (Unicode) textual data. This is :func:`py2:unicode` in
Python 2 and :func:`py3:str` in Python 3.
.. data:: binary_type
Type for representing binary data. This is :func:`py2:str` in Python 2 and
:func:`py3:bytes` in Python 3.
.. data:: MAXSIZE
The maximum size of a container like :func:`py3:list` or :func:`py3:dict`.
This is equivalent to :data:`py3:sys.maxsize` in Python 2.6 and later
(including 3.x). Note, this is temptingly similar to, but not the same as
:data:`py2:sys.maxint` in Python 2. There is no direct equivalent to
:data:`py2:sys.maxint` in Python 3 because its integer type has no limits
aside from memory.
Here's example usage of the module::
import six
def dispatch_types(value):
if isinstance(value, six.integer_types):
handle_integer(value)
elif isinstance(value, six.class_types):
handle_class(value)
elif isinstance(value, six.string_types):
handle_string(value)
Object model compatibility
>>>>>>>>>>>>>>>>>>>>>>>>>>
Python 3 renamed the attributes of several interpreter data structures. The
following accessors are available. Note that the recommended way to inspect
functions and methods is the stdlib :mod:`py3:inspect` module.
.. function:: get_unbound_function(meth)
Get the function out of unbound method *meth*. In Python 3, unbound methods
don't exist, so this function just returns *meth* unchanged. Example
usage::
from six import get_unbound_function
class X(object):
def method(self):
pass
method_function = get_unbound_function(X.method)
.. function:: get_method_function(meth)
Get the function out of method object *meth*.
.. function:: get_method_self(meth)
Get the ``self`` of bound method *meth*.
.. function:: get_function_closure(func)
Get the closure (list of cells) associated with *func*. This is equivalent
to ``func.__closure__`` on Python 2.6+ and ``func.func_closure`` on Python
2.5.
.. function:: get_function_code(func)
Get the code object associated with *func*. This is equivalent to
``func.__code__`` on Python 2.6+ and ``func.func_code`` on Python 2.5.
.. function:: get_function_defaults(func)
Get the defaults tuple associated with *func*. This is equivalent to
``func.__defaults__`` on Python 2.6+ and ``func.func_defaults`` on Python
2.5.
.. function:: get_function_globals(func)
Get the globals of *func*. This is equivalent to ``func.__globals__`` on
Python 2.6+ and ``func.func_globals`` on Python 2.5.
.. function:: next(it)
advance_iterator(it)
Get the next item of iterator *it*. :exc:`py3:StopIteration` is raised if
the iterator is exhausted. This is a replacement for calling ``it.next()``
in Python 2 and ``next(it)`` in Python 3. Python 2.6 and above have a
builtin ``next`` function, so six's version is only necessary for Python 2.5
compatibility.
.. function:: callable(obj)
Check if *obj* can be called. Note ``callable`` has returned in Python 3.2,
so using six's version is only necessary when supporting Python 3.0 or 3.1.
.. function:: iterkeys(dictionary, **kwargs)
Returns an iterator over *dictionary*\'s keys. This replaces
``dictionary.iterkeys()`` on Python 2 and ``dictionary.keys()`` on
Python 3. *kwargs* are passed through to the underlying method.
.. function:: itervalues(dictionary, **kwargs)
Returns an iterator over *dictionary*\'s values. This replaces
``dictionary.itervalues()`` on Python 2 and ``dictionary.values()`` on
Python 3. *kwargs* are passed through to the underlying method.
.. function:: iteritems(dictionary, **kwargs)
Returns an iterator over *dictionary*\'s items. This replaces
``dictionary.iteritems()`` on Python 2 and ``dictionary.items()`` on
Python 3. *kwargs* are passed through to the underlying method.
.. function:: iterlists(dictionary, **kwargs)
Calls ``dictionary.iterlists()`` on Python 2 and ``dictionary.lists()`` on
Python 3. No builtin Python mapping type has such a method; this method is
intended for use with multi-valued dictionaries like `Werkzeug's
<http://werkzeug.pocoo.org/docs/datastructures/#werkzeug.datastructures.MultiDict>`_.
*kwargs* are passed through to the underlying method.
.. function:: viewkeys(dictionary)
Return a view over *dictionary*\'s keys. This replaces
:meth:`py2:dict.viewkeys` on Python 2.7 and :meth:`py3:dict.keys` on
Python 3.
.. function:: viewvalues(dictionary)
Return a view over *dictionary*\'s values. This replaces
:meth:`py2:dict.viewvalues` on Python 2.7 and :meth:`py3:dict.values` on
Python 3.
.. function:: viewitems(dictionary)
Return a view over *dictionary*\'s items. This replaces
:meth:`py2:dict.viewitems` on Python 2.7 and :meth:`py3:dict.items` on
Python 3.
.. function:: create_bound_method(func, obj)
Return a method object wrapping *func* and bound to *obj*. On both Python 2
and 3, this will return a :func:`py3:types.MethodType` object. The reason
this wrapper exists is that on Python 2, the ``MethodType`` constructor
requires the *obj*'s class to be passed.
.. function:: create_unbound_method(func, cls)
Return an unbound method object wrapping *func*. In Python 2, this will
return a :func:`py2:types.MethodType` object. In Python 3, unbound methods
do not exist and this wrapper will simply return *func*.
.. class:: Iterator
A class for making portable iterators. The intention is that it be subclassed
and subclasses provide a ``__next__`` method. In Python 2, :class:`Iterator`
has one method: ``next``. It simply delegates to ``__next__``. An alternate
way to do this would be to simply alias ``next`` to ``__next__``. However,
this interacts badly with subclasses that override
``__next__``. :class:`Iterator` is empty on Python 3. (In fact, it is just
aliased to :class:`py3:object`.)
.. decorator:: wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES)
This is exactly the :func:`py3:functools.wraps` decorator, but it sets the
``__wrapped__`` attribute on what it decorates as :func:`py3:functools.wraps`
does on Python versions after 3.2.
Syntax compatibility
>>>>>>>>>>>>>>>>>>>>
These functions smooth over operations which have different syntaxes between
Python 2 and 3.
.. function:: exec_(code, globals=None, locals=None)
Execute *code* in the scope of *globals* and *locals*. *code* can be a
string or a code object. If *globals* or *locals* are not given, they will
default to the scope of the caller. If just *globals* is given, it will also
be used as *locals*.
.. note::
Python 3's :func:`py3:exec` doesn't take keyword arguments, so calling
:func:`exec` with them should be avoided.
.. function:: print_(*args, *, file=sys.stdout, end="\\n", sep=" ", flush=False)
Print *args* into *file*. Each argument will be separated with *sep* and
*end* will be written to the file after the last argument is printed. If
*flush* is true, ``file.flush()`` will be called after all data is written.
.. note::
In Python 2, this function imitates Python 3's :func:`py3:print` by not
having softspace support. If you don't know what that is, you're probably
ok. :)
.. function:: raise_from(exc_value, exc_value_from)
Raise an exception from a context. On Python 3, this is equivalent to
``raise exc_value from exc_value_from``. On Python 2, which does not support
exception chaining, it is equivalent to ``raise exc_value``.
.. function:: reraise(exc_type, exc_value, exc_traceback=None)
Reraise an exception, possibly with a different traceback. In the simple
case, ``reraise(*sys.exc_info())`` with an active exception (in an except
block) reraises the current exception with the last traceback. A different
traceback can be specified with the *exc_traceback* parameter. Note that
since the exception reraising is done within the :func:`reraise` function,
Python will attach the call frame of :func:`reraise` to whatever traceback is
raised.
.. function:: with_metaclass(metaclass, *bases)
Create a new class with base classes *bases* and metaclass *metaclass*. This
is designed to be used in class declarations like this: ::
from six import with_metaclass
class Meta(type):
pass
class Base(object):
pass
class MyClass(with_metaclass(Meta, Base)):
pass
Another way to set a metaclass on a class is with the :func:`add_metaclass`
decorator.
.. decorator:: add_metaclass(metaclass)
Class decorator that replaces a normally-constructed class with a
metaclass-constructed one. Example usage: ::
@add_metaclass(Meta)
class MyClass(object):
pass
That code produces a class equivalent to ::
class MyClass(object, metaclass=Meta):
pass
on Python 3 or ::
class MyClass(object):
__metaclass__ = Meta
on Python 2.
Note that class decorators require Python 2.6. However, the effect of the
decorator can be emulated on Python 2.5 like so::
class MyClass(object):
pass
MyClass = add_metaclass(Meta)(MyClass)
Binary and text data
>>>>>>>>>>>>>>>>>>>>
Python 3 enforces the distinction between byte strings and text strings far more
rigorously than Python 2 does; binary data cannot be automatically coerced to
or from text data. six provides several functions to assist in classifying
string data in all Python versions.
.. function:: b(data)
A "fake" bytes literal. *data* should always be a normal string literal. In
Python 2, :func:`b` returns an 8-bit string. In Python 3, *data* is encoded
with the latin-1 encoding to bytes.
.. note::
Since all Python versions 2.6 and after support the ``b`` prefix,
code without 2.5 support doesn't need :func:`b`.
.. function:: u(text)
A "fake" unicode literal. *text* should always be a normal string literal.
In Python 2, :func:`u` returns unicode, and in Python 3, a string. Also, in
Python 2, the string is decoded with the ``unicode-escape`` codec, which
allows unicode escapes to be used in it.
.. note::
In Python 3.3, the ``u`` prefix has been reintroduced. Code that only
supports Python 3 versions of 3.3 and higher thus does not need
:func:`u`.
.. note::
On Python 2, :func:`u` doesn't know what the encoding of the literal
is. Each byte is converted directly to the unicode codepoint of the same
value. Because of this, it's only safe to use :func:`u` with strings of
ASCII data.
.. function:: unichr(c)
Return the (Unicode) string representing the codepoint *c*. This is
equivalent to :func:`py2:unichr` on Python 2 and :func:`py3:chr` on Python 3.
.. function:: int2byte(i)
Converts *i* to a byte. *i* must be in ``range(0, 256)``. This is
equivalent to :func:`py2:chr` in Python 2 and ``bytes((i,))`` in Python 3.
.. function:: byte2int(bs)
Converts the first byte of *bs* to an integer. This is equivalent to
``ord(bs[0])`` on Python 2 and ``bs[0]`` on Python 3.
.. function:: indexbytes(buf, i)
Return the byte at index *i* of *buf* as an integer. This is equivalent to
indexing a bytes object in Python 3.
.. function:: iterbytes(buf)
Return an iterator over bytes in *buf* as integers. This is equivalent to
a bytes object iterator in Python 3.
.. function:: ensure_binary(s, encoding='utf-8', errors='strict')
Coerce *s* to :data:`binary_type`. *encoding*, *errors* are the same as
:meth:`py3:str.encode`
.. function:: ensure_str(s, encoding='utf-8', errors='strict')
Coerce *s* to ``str``. ``encoding``, ``errors`` are the same
:meth:`py3:str.encode`
.. function:: ensure_text(s, encoding='utf-8', errors='strict')
Coerce *s* to :data:`text_type`. *encoding*, *errors* are the same as
:meth:`py3:str.encode`
.. data:: StringIO
This is a fake file object for textual data. It's an alias for
:class:`py2:StringIO.StringIO` in Python 2 and :class:`py3:io.StringIO` in
Python 3.
.. data:: BytesIO
This is a fake file object for binary data. In Python 2, it's an alias for
:class:`py2:StringIO.StringIO`, but in Python 3, it's an alias for
:class:`py3:io.BytesIO`.
.. decorator:: python_2_unicode_compatible
A class decorator that takes a class defining a ``__str__`` method. On
Python 3, the decorator does nothing. On Python 2, it aliases the
``__str__`` method to ``__unicode__`` and creates a new ``__str__`` method
that returns the result of ``__unicode__()`` encoded with UTF-8.
unittest assertions
>>>>>>>>>>>>>>>>>>>
Six contains compatibility shims for unittest assertions that have been renamed.
The parameters are the same as their aliases, but you must pass the test method
as the first argument. For example::
import six
import unittest
class TestAssertCountEqual(unittest.TestCase):
def test(self):
six.assertCountEqual(self, (1, 2), [2, 1])
Note these functions are only available on Python 2.7 or later.
.. function:: assertCountEqual()
Alias for :meth:`~py3:unittest.TestCase.assertCountEqual` on Python 3 and
:meth:`~py2:unittest.TestCase.assertItemsEqual` on Python 2.
.. function:: assertRaisesRegex()
Alias for :meth:`~py3:unittest.TestCase.assertRaisesRegex` on Python 3 and
:meth:`~py2:unittest.TestCase.assertRaisesRegexp` on Python 2.
.. function:: assertRegex()
Alias for :meth:`~py3:unittest.TestCase.assertRegex` on Python 3 and
:meth:`~py2:unittest.TestCase.assertRegexpMatches` on Python 2.
Renamed modules and attributes compatibility
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
.. module:: six.moves
:synopsis: Renamed modules and attributes compatibility
Python 3 reorganized the standard library and moved several functions to
different modules. Six provides a consistent interface to them through the fake
:mod:`six.moves` module. For example, to load the module for parsing HTML on
Python 2 or 3, write::
from six.moves import html_parser
Similarly, to get the function to reload modules, which was moved from the
builtin module to the ``importlib`` module, use::
from six.moves import reload_module
For the most part, :mod:`six.moves` aliases are the names of the modules in
Python 3. When the new Python 3 name is a package, the components of the name
are separated by underscores. For example, ``html.parser`` becomes
``html_parser``. In some cases where several modules have been combined, the
Python 2 name is retained. This is so the appropriate modules can be found when
running on Python 2. For example, ``BaseHTTPServer`` which is in
``http.server`` in Python 3 is aliased as ``BaseHTTPServer``.
Some modules which had two implementations have been merged in Python 3. For
example, ``cPickle`` no longer exists in Python 3; it was merged with
``pickle``. In these cases, fetching the fast version will load the fast one on
Python 2 and the merged module in Python 3.
The :mod:`py2:urllib`, :mod:`py2:urllib2`, and :mod:`py2:urlparse` modules have
been combined in the :mod:`py3:urllib` package in Python 3. The
:mod:`six.moves.urllib` package is a version-independent location for this
functionality; its structure mimics the structure of the Python 3
:mod:`py3:urllib` package.
.. note::
In order to make imports of the form::
from six.moves.cPickle import loads
work, six places special proxy objects in :data:`py3:sys.modules`. These
proxies lazily load the underlying module when an attribute is fetched. This
will fail if the underlying module is not available in the Python
interpreter. For example, ``sys.modules["six.moves.winreg"].LoadKey`` would
fail on any non-Windows platform. Unfortunately, some applications try to
load attributes on every module in :data:`py3:sys.modules`. six mitigates
this problem for some applications by pretending attributes on unimportable
modules do not exist. This hack does not work in every case, though. If you are
encountering problems with the lazy modules and don't use any from imports
directly from ``six.moves`` modules, you can workaround the issue by removing
the six proxy modules::
d = [name for name in sys.modules if name.startswith("six.moves.")]
for name in d:
del sys.modules[name]
Supported renames:
+------------------------------+-------------------------------------+---------------------------------------+
| Name | Python 2 name | Python 3 name |
+==============================+=====================================+=======================================+
| ``builtins`` | :mod:`py2:__builtin__` | :mod:`py3:builtins` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``configparser`` | :mod:`py2:ConfigParser` | :mod:`py3:configparser` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``copyreg`` | :mod:`py2:copy_reg` | :mod:`py3:copyreg` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``cPickle`` | :mod:`py2:cPickle` | :mod:`py3:pickle` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``cStringIO`` | :func:`py2:cStringIO.StringIO` | :class:`py3:io.StringIO` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``dbm_gnu`` | :func:`py2:gdbm` | :class:`py3:dbm.gnu` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``_dummy_thread`` | :mod:`py2:dummy_thread` | :mod:`py3:_dummy_thread` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``email_mime_base`` | :mod:`py2:email.MIMEBase` | :mod:`py3:email.mime.base` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``email_mime_image`` | :mod:`py2:email.MIMEImage` | :mod:`py3:email.mime.image` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``email_mime_multipart`` | :mod:`py2:email.MIMEMultipart` | :mod:`py3:email.mime.multipart` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``email_mime_nonmultipart`` | :mod:`py2:email.MIMENonMultipart` | :mod:`py3:email.mime.nonmultipart` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``email_mime_text`` | :mod:`py2:email.MIMEText` | :mod:`py3:email.mime.text` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``filter`` | :func:`py2:itertools.ifilter` | :func:`py3:filter` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``filterfalse`` | :func:`py2:itertools.ifilterfalse` | :func:`py3:itertools.filterfalse` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``getcwd`` | :func:`py2:os.getcwdu` | :func:`py3:os.getcwd` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``getcwdb`` | :func:`py2:os.getcwd` | :func:`py3:os.getcwdb` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``getoutput`` | :func:`py2:commands.getoutput` | :func:`py3:subprocess.getoutput` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``http_cookiejar`` | :mod:`py2:cookielib` | :mod:`py3:http.cookiejar` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``http_cookies`` | :mod:`py2:Cookie` | :mod:`py3:http.cookies` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``html_entities`` | :mod:`py2:htmlentitydefs` | :mod:`py3:html.entities` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``html_parser`` | :mod:`py2:HTMLParser` | :mod:`py3:html.parser` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``http_client`` | :mod:`py2:httplib` | :mod:`py3:http.client` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``BaseHTTPServer`` | :mod:`py2:BaseHTTPServer` | :mod:`py3:http.server` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``CGIHTTPServer`` | :mod:`py2:CGIHTTPServer` | :mod:`py3:http.server` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``SimpleHTTPServer`` | :mod:`py2:SimpleHTTPServer` | :mod:`py3:http.server` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``input`` | :func:`py2:raw_input` | :func:`py3:input` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``intern`` | :func:`py2:intern` | :func:`py3:sys.intern` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``map`` | :func:`py2:itertools.imap` | :func:`py3:map` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``queue`` | :mod:`py2:Queue` | :mod:`py3:queue` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``range`` | :func:`py2:xrange` | :func:`py3:range` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``reduce`` | :func:`py2:reduce` | :func:`py3:functools.reduce` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``reload_module`` | :func:`py2:reload` | :func:`py3:imp.reload`, |
| | | :func:`py3:importlib.reload` |
| | | on Python 3.4+ |
+------------------------------+-------------------------------------+---------------------------------------+
| ``reprlib`` | :mod:`py2:repr` | :mod:`py3:reprlib` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``shlex_quote`` | :mod:`py2:pipes.quote` | :mod:`py3:shlex.quote` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``socketserver`` | :mod:`py2:SocketServer` | :mod:`py3:socketserver` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``_thread`` | :mod:`py2:thread` | :mod:`py3:_thread` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter`` | :mod:`py2:Tkinter` | :mod:`py3:tkinter` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_dialog`` | :mod:`py2:Dialog` | :mod:`py3:tkinter.dialog` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_filedialog`` | :mod:`py2:FileDialog` | :mod:`py3:tkinter.FileDialog` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_scrolledtext`` | :mod:`py2:ScrolledText` | :mod:`py3:tkinter.scrolledtext` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_simpledialog`` | :mod:`py2:SimpleDialog` | :mod:`py3:tkinter.simpledialog` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_ttk`` | :mod:`py2:ttk` | :mod:`py3:tkinter.ttk` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_tix`` | :mod:`py2:Tix` | :mod:`py3:tkinter.tix` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_constants`` | :mod:`py2:Tkconstants` | :mod:`py3:tkinter.constants` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_dnd`` | :mod:`py2:Tkdnd` | :mod:`py3:tkinter.dnd` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_colorchooser`` | :mod:`py2:tkColorChooser` | :mod:`py3:tkinter.colorchooser` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_commondialog`` | :mod:`py2:tkCommonDialog` | :mod:`py3:tkinter.commondialog` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_tkfiledialog`` | :mod:`py2:tkFileDialog` | :mod:`py3:tkinter.filedialog` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_font`` | :mod:`py2:tkFont` | :mod:`py3:tkinter.font` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_messagebox`` | :mod:`py2:tkMessageBox` | :mod:`py3:tkinter.messagebox` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``tkinter_tksimpledialog`` | :mod:`py2:tkSimpleDialog` | :mod:`py3:tkinter.simpledialog` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``urllib.parse`` | See :mod:`six.moves.urllib.parse` | :mod:`py3:urllib.parse` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``urllib.error`` | See :mod:`six.moves.urllib.error` | :mod:`py3:urllib.error` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``urllib.request`` | See :mod:`six.moves.urllib.request` | :mod:`py3:urllib.request` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``urllib.response`` | See :mod:`six.moves.urllib.response`| :mod:`py3:urllib.response` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``urllib.robotparser`` | :mod:`py2:robotparser` | :mod:`py3:urllib.robotparser` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``urllib_robotparser`` | :mod:`py2:robotparser` | :mod:`py3:urllib.robotparser` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``UserDict`` | :class:`py2:UserDict.UserDict` | :class:`py3:collections.UserDict` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``UserList`` | :class:`py2:UserList.UserList` | :class:`py3:collections.UserList` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``UserString`` | :class:`py2:UserString.UserString` | :class:`py3:collections.UserString` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``winreg`` | :mod:`py2:_winreg` | :mod:`py3:winreg` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``xmlrpc_client`` | :mod:`py2:xmlrpclib` | :mod:`py3:xmlrpc.client` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``xmlrpc_server`` | :mod:`py2:SimpleXMLRPCServer` | :mod:`py3:xmlrpc.server` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``xrange`` | :func:`py2:xrange` | :func:`py3:range` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``zip`` | :func:`py2:itertools.izip` | :func:`py3:zip` |
+------------------------------+-------------------------------------+---------------------------------------+
| ``zip_longest`` | :func:`py2:itertools.izip_longest` | :func:`py3:itertools.zip_longest` |
+------------------------------+-------------------------------------+---------------------------------------+
urllib parse
<<<<<<<<<<<<
.. module:: six.moves.urllib.parse
:synopsis: Stuff from :mod:`py2:urlparse` and :mod:`py2:urllib` in Python 2 and :mod:`py3:urllib.parse` in Python 3
Contains functions from Python 3's :mod:`py3:urllib.parse` and Python 2's:
:mod:`py2:urlparse`:
* :func:`py2:urlparse.ParseResult`
* :func:`py2:urlparse.SplitResult`
* :func:`py2:urlparse.urlparse`
* :func:`py2:urlparse.urlunparse`
* :func:`py2:urlparse.parse_qs`
* :func:`py2:urlparse.parse_qsl`
* :func:`py2:urlparse.urljoin`
* :func:`py2:urlparse.urldefrag`
* :func:`py2:urlparse.urlsplit`
* :func:`py2:urlparse.urlunsplit`
* :func:`py2:urlparse.splitquery`
* :func:`py2:urlparse.uses_fragment`
* :func:`py2:urlparse.uses_netloc`
* :func:`py2:urlparse.uses_params`
* :func:`py2:urlparse.uses_query`
* :func:`py2:urlparse.uses_relative`
and :mod:`py2:urllib`:
* :func:`py2:urllib.quote`
* :func:`py2:urllib.quote_plus`
* :func:`py2:urllib.splittag`
* :func:`py2:urllib.splituser`
* :func:`py2:urllib.splitvalue`
* :func:`py2:urllib.unquote` (also exposed as :func:`py3:urllib.parse.unquote_to_bytes`)
* :func:`py2:urllib.unquote_plus`
* :func:`py2:urllib.urlencode`
urllib error
<<<<<<<<<<<<
.. module:: six.moves.urllib.error
:synopsis: Stuff from :mod:`py2:urllib` and :mod:`py2:urllib2` in Python 2 and :mod:`py3:urllib.error` in Python 3
Contains exceptions from Python 3's :mod:`py3:urllib.error` and Python 2's:
:mod:`py2:urllib`:
* :exc:`py2:urllib.ContentTooShortError`
and :mod:`py2:urllib2`:
* :exc:`py2:urllib2.URLError`
* :exc:`py2:urllib2.HTTPError`
urllib request
<<<<<<<<<<<<<<
.. module:: six.moves.urllib.request
:synopsis: Stuff from :mod:`py2:urllib` and :mod:`py2:urllib2` in Python 2 and :mod:`py3:urllib.request` in Python 3
Contains items from Python 3's :mod:`py3:urllib.request` and Python 2's:
:mod:`py2:urllib`:
* :func:`py2:urllib.pathname2url`
* :func:`py2:urllib.url2pathname`
* :func:`py2:urllib.getproxies`
* :func:`py2:urllib.urlretrieve`
* :func:`py2:urllib.urlcleanup`
* :class:`py2:urllib.URLopener`
* :class:`py2:urllib.FancyURLopener`
* :func:`py2:urllib.proxy_bypass`
and :mod:`py2:urllib2`:
* :func:`py2:urllib2.urlopen`
* :func:`py2:urllib2.install_opener`
* :func:`py2:urllib2.build_opener`
* :func:`py2:urllib2.parse_http_list`
* :func:`py2:urllib2.parse_keqv_list`
* :class:`py2:urllib2.Request`
* :class:`py2:urllib2.OpenerDirector`
* :class:`py2:urllib2.HTTPDefaultErrorHandler`
* :class:`py2:urllib2.HTTPRedirectHandler`
* :class:`py2:urllib2.HTTPCookieProcessor`
* :class:`py2:urllib2.ProxyHandler`
* :class:`py2:urllib2.BaseHandler`
* :class:`py2:urllib2.HTTPPasswordMgr`
* :class:`py2:urllib2.HTTPPasswordMgrWithDefaultRealm`
* :class:`py2:urllib2.AbstractBasicAuthHandler`
* :class:`py2:urllib2.HTTPBasicAuthHandler`
* :class:`py2:urllib2.ProxyBasicAuthHandler`
* :class:`py2:urllib2.AbstractDigestAuthHandler`
* :class:`py2:urllib2.HTTPDigestAuthHandler`
* :class:`py2:urllib2.ProxyDigestAuthHandler`
* :class:`py2:urllib2.HTTPHandler`
* :class:`py2:urllib2.HTTPSHandler`
* :class:`py2:urllib2.FileHandler`
* :class:`py2:urllib2.FTPHandler`
* :class:`py2:urllib2.CacheFTPHandler`
* :class:`py2:urllib2.UnknownHandler`
* :class:`py2:urllib2.HTTPErrorProcessor`
urllib response
<<<<<<<<<<<<<<<
.. module:: six.moves.urllib.response
:synopsis: Stuff from :mod:`py2:urllib` in Python 2 and :mod:`py3:urllib.response` in Python 3
Contains classes from Python 3's :mod:`py3:urllib.response` and Python 2's:
:mod:`py2:urllib`:
* :class:`py2:urllib.addbase`
* :class:`py2:urllib.addclosehook`
* :class:`py2:urllib.addinfo`
* :class:`py2:urllib.addinfourl`
Advanced - Customizing renames
<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
.. currentmodule:: six
It is possible to add additional names to the :mod:`six.moves` namespace.
.. function:: add_move(item)
Add *item* to the :mod:`six.moves` mapping. *item* should be a
:class:`MovedAttribute` or :class:`MovedModule` instance.
.. function:: remove_move(name)
Remove the :mod:`six.moves` mapping called *name*. *name* should be a
string.
Instances of the following classes can be passed to :func:`add_move`. Neither
have any public members.
.. class:: MovedModule(name, old_mod, new_mod)
Create a mapping for :mod:`six.moves` called *name* that references different
modules in Python 2 and 3. *old_mod* is the name of the Python 2 module.
*new_mod* is the name of the Python 3 module.
.. class:: MovedAttribute(name, old_mod, new_mod, old_attr=None, new_attr=None)
Create a mapping for :mod:`six.moves` called *name* that references different
attributes in Python 2 and 3. *old_mod* is the name of the Python 2 module.
*new_mod* is the name of the Python 3 module. If *new_attr* is not given, it
defaults to *old_attr*. If neither is given, they both default to *name*.
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/six/documentation/index.rst
| 0.895754 | 0.80456 |
index.rst
|
pypi
|
import logging
from pyu2f import apdu
from pyu2f import errors
class SecurityKey(object):
"""Low level api for talking to a security key.
This class implements the low level api specified in FIDO
U2F for talking to a security key.
"""
def __init__(self, transport):
self.transport = transport
self.use_legacy_format = False
self.logger = logging.getLogger('pyu2f.hardware')
def CmdRegister(self, challenge_param, app_param):
"""Register security key.
Ask the security key to register with a particular origin & client.
Args:
challenge_param: Arbitrary 32 byte challenge string.
app_param: Arbitrary 32 byte applciation parameter.
Returns:
A binary structure containing the key handle, attestation, and a
signature over that by the attestation key. The precise format
is dictated by the FIDO U2F specs.
Raises:
TUPRequiredError: A Test of User Precense is required to proceed.
ApduError: Something went wrong on the device.
"""
self.logger.debug('CmdRegister')
if len(challenge_param) != 32 or len(app_param) != 32:
raise errors.InvalidRequestError()
body = bytearray(challenge_param + app_param)
response = self.InternalSendApdu(apdu.CommandApdu(
0,
apdu.CMD_REGISTER,
0x03, # Per the U2F reference code tests
0x00,
body))
response.CheckSuccessOrRaise()
return response.body
def CmdAuthenticate(self,
challenge_param,
app_param,
key_handle,
check_only=False):
"""Attempt to obtain an authentication signature.
Ask the security key to sign a challenge for a particular key handle
in order to authenticate the user.
Args:
challenge_param: SHA-256 hash of client_data object as a bytes
object.
app_param: SHA-256 hash of the app id as a bytes object.
key_handle: The key handle to use to issue the signature as a bytes
object.
check_only: If true, only check if key_handle is valid.
Returns:
A binary structure containing the key handle, attestation, and a
signature over that by the attestation key. The precise format
is dictated by the FIDO U2F specs.
Raises:
TUPRequiredError: If check_only is False, a Test of User Precense
is required to proceed. If check_only is True, this means
the key_handle is valid.
InvalidKeyHandleError: The key_handle is not valid for this device.
ApduError: Something else went wrong on the device.
"""
self.logger.debug('CmdAuthenticate')
if len(challenge_param) != 32 or len(app_param) != 32:
raise errors.InvalidRequestError()
control = 0x07 if check_only else 0x03
body = bytearray(challenge_param + app_param +
bytearray([len(key_handle)]) + key_handle)
response = self.InternalSendApdu(apdu.CommandApdu(
0, apdu.CMD_AUTH, control, 0x00, body))
response.CheckSuccessOrRaise()
return response.body
def CmdVersion(self):
"""Obtain the version of the device and test transport format.
Obtains the version of the device and determines whether to use ISO
7816-4 or the U2f variant. This function should be called at least once
before CmdAuthenticate or CmdRegister to make sure the object is using the
proper transport for the device.
Returns:
The version of the U2F protocol in use.
"""
self.logger.debug('CmdVersion')
response = self.InternalSendApdu(apdu.CommandApdu(
0, apdu.CMD_VERSION, 0x00, 0x00))
if not response.IsSuccess():
raise errors.ApduError(response.sw1, response.sw2)
return response.body
def CmdBlink(self, time):
self.logger.debug('CmdBlink')
self.transport.SendBlink(time)
def CmdWink(self):
self.logger.debug('CmdWink')
self.transport.SendWink()
def CmdPing(self, data):
self.logger.debug('CmdPing')
return self.transport.SendPing(data)
def InternalSendApdu(self, apdu_to_send):
"""Send an APDU to the device.
Sends an APDU to the device, possibly falling back to the legacy
encoding format that is not ISO7816-4 compatible.
Args:
apdu_to_send: The CommandApdu object to send
Returns:
The ResponseApdu object constructed out of the devices reply.
"""
response = None
if not self.use_legacy_format:
response = apdu.ResponseApdu(self.transport.SendMsgBytes(
apdu_to_send.ToByteArray()))
if response.sw1 == 0x67 and response.sw2 == 0x00:
# If we failed using the standard format, retry with the
# legacy format.
self.use_legacy_format = True
return self.InternalSendApdu(apdu_to_send)
else:
response = apdu.ResponseApdu(self.transport.SendMsgBytes(
apdu_to_send.ToLegacyU2FByteArray()))
return response
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyu2f/pyu2f/hardware.py
| 0.856453 | 0.402333 |
hardware.py
|
pypi
|
import hashlib
import socket
import time
from pyu2f import errors
from pyu2f import hardware
from pyu2f import hidtransport
from pyu2f import model
def GetLocalU2FInterface(origin=socket.gethostname()):
"""Obtains a U2FInterface for the first valid local U2FHID device found."""
hid_transports = hidtransport.DiscoverLocalHIDU2FDevices()
for t in hid_transports:
try:
return U2FInterface(security_key=hardware.SecurityKey(transport=t),
origin=origin)
except errors.UnsupportedVersionException:
# Skip over devices that don't speak the proper version of the protocol.
pass
# Unable to find a device
raise errors.NoDeviceFoundError()
class U2FInterface(object):
"""High level U2F interface.
Implements a high level interface in the spirit of the FIDO U2F
javascript API high level interface. It supports registration
and authentication (signing).
IMPORTANT NOTE: This class does NOT validate the app id against the
origin. In particular, any user can assert any app id all the way to
the device. The security model of a python library is such that doing
so would not provide significant benfit as it could be bypassed by the
caller talking to a lower level of the API. In fact, so could the origin
itself. The origin is still set to a plausible value (the hostname) by
this library.
TODO(gdasher): Figure out a plan on how to address this gap/document the
consequences of this more clearly.
"""
def __init__(self, security_key, origin=socket.gethostname()):
self.origin = origin
self.security_key = security_key
if self.security_key.CmdVersion() != b'U2F_V2':
raise errors.UnsupportedVersionException()
def Register(self, app_id, challenge, registered_keys):
"""Registers app_id with the security key.
Executes the U2F registration flow with the security key.
Args:
app_id: The app_id to register the security key against.
challenge: Server challenge passed to the security key.
registered_keys: List of keys already registered for this app_id+user.
Returns:
RegisterResponse with key_handle and attestation information in it (
encoded in FIDO U2F binary format within registration_data field).
Raises:
U2FError: There was some kind of problem with registration (e.g.
the device was already registered or there was a timeout waiting
for the test of user presence).
"""
client_data = model.ClientData(model.ClientData.TYP_REGISTRATION, challenge,
self.origin)
challenge_param = self.InternalSHA256(client_data.GetJson())
app_param = self.InternalSHA256(app_id)
for key in registered_keys:
try:
# skip non U2F_V2 keys
if key.version != u'U2F_V2':
continue
resp = self.security_key.CmdAuthenticate(challenge_param, app_param,
key.key_handle, True)
# check_only mode CmdAuthenticate should always raise some
# exception
raise errors.HardwareError('Should Never Happen')
except errors.TUPRequiredError:
# This indicates key was valid. Thus, no need to register
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
except errors.InvalidKeyHandleError as e:
# This is the case of a key for a different token, so we just ignore it.
pass
except errors.HardwareError as e:
raise errors.U2FError(errors.U2FError.BAD_REQUEST, e)
# Now register the new key
for _ in range(30):
try:
resp = self.security_key.CmdRegister(challenge_param, app_param)
return model.RegisterResponse(resp, client_data)
except errors.TUPRequiredError as e:
self.security_key.CmdWink()
time.sleep(0.5)
except errors.HardwareError as e:
raise errors.U2FError(errors.U2FError.BAD_REQUEST, e)
raise errors.U2FError(errors.U2FError.TIMEOUT)
def Authenticate(self, app_id, challenge, registered_keys):
"""Authenticates app_id with the security key.
Executes the U2F authentication/signature flow with the security key.
Args:
app_id: The app_id to register the security key against.
challenge: Server challenge passed to the security key as a bytes object.
registered_keys: List of keys already registered for this app_id+user.
Returns:
SignResponse with client_data, key_handle, and signature_data. The client
data is an object, while the signature_data is encoded in FIDO U2F binary
format.
Raises:
U2FError: There was some kind of problem with authentication (e.g.
there was a timeout while waiting for the test of user presence.)
"""
client_data = model.ClientData(model.ClientData.TYP_AUTHENTICATION,
challenge, self.origin)
app_param = self.InternalSHA256(app_id)
challenge_param = self.InternalSHA256(client_data.GetJson())
num_invalid_keys = 0
for key in registered_keys:
try:
if key.version != u'U2F_V2':
continue
for _ in range(30):
try:
resp = self.security_key.CmdAuthenticate(challenge_param, app_param,
key.key_handle)
return model.SignResponse(key.key_handle, resp, client_data)
except errors.TUPRequiredError:
self.security_key.CmdWink()
time.sleep(0.5)
except errors.InvalidKeyHandleError:
num_invalid_keys += 1
continue
except errors.HardwareError as e:
raise errors.U2FError(errors.U2FError.BAD_REQUEST, e)
if num_invalid_keys == len(registered_keys):
# In this case, all provided keys were invalid.
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
# In this case, the TUP was not pressed.
raise errors.U2FError(errors.U2FError.TIMEOUT)
def InternalSHA256(self, string):
md = hashlib.sha256()
md.update(string.encode())
return md.digest()
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyu2f/pyu2f/u2f.py
| 0.6137 | 0.291245 |
u2f.py
|
pypi
|
"""Implements raw HID interface on Linux using SysFS and device files."""
from __future__ import division
import os
import struct
from pyu2f import errors
from pyu2f.hid import base
REPORT_DESCRIPTOR_KEY_MASK = 0xfc
LONG_ITEM_ENCODING = 0xfe
OUTPUT_ITEM = 0x90
INPUT_ITEM = 0x80
COLLECTION_ITEM = 0xa0
REPORT_COUNT = 0x94
REPORT_SIZE = 0x74
USAGE_PAGE = 0x04
USAGE = 0x08
def GetValueLength(rd, pos):
"""Get value length for a key in rd.
For a key at position pos in the Report Descriptor rd, return the length
of the associated value. This supports both short and long format
values.
Args:
rd: Report Descriptor
pos: The position of the key in rd.
Returns:
(key_size, data_len) where key_size is the number of bytes occupied by
the key and data_len is the length of the value associated by the key.
"""
rd = bytearray(rd)
key = rd[pos]
if key == LONG_ITEM_ENCODING:
# If the key is tagged as a long item (0xfe), then the format is
# [key (1 byte)] [data len (1 byte)] [item tag (1 byte)] [data (n # bytes)].
# Thus, the entire key record is 3 bytes long.
if pos + 1 < len(rd):
return (3, rd[pos + 1])
else:
raise errors.HidError('Malformed report descriptor')
else:
# If the key is tagged as a short item, then the item tag and data len are
# packed into one byte. The format is thus:
# [tag (high 4 bits)] [type (2 bits)] [size code (2 bits)] [data (n bytes)].
# The size code specifies 1,2, or 4 bytes (0x03 means 4 bytes).
code = key & 0x03
if code <= 0x02:
return (1, code)
elif code == 0x03:
return (1, 4)
raise errors.HidError('Cannot happen')
def ReadLsbBytes(rd, offset, value_size):
"""Reads value_size bytes from rd at offset, least signifcant byte first."""
encoding = None
if value_size == 1:
encoding = '<B'
elif value_size == 2:
encoding = '<H'
elif value_size == 4:
encoding = '<L'
else:
raise errors.HidError('Invalid value size specified')
ret, = struct.unpack(encoding, rd[offset:offset + value_size])
return ret
class NoReportCountFound(Exception):
pass
def ParseReportDescriptor(rd, desc):
"""Parse the binary report descriptor.
Parse the binary report descriptor into a DeviceDescriptor object.
Args:
rd: The binary report descriptor
desc: The DeviceDescriptor object to update with the results
from parsing the descriptor.
Returns:
None
"""
rd = bytearray(rd)
pos = 0
report_count = None
report_size = None
usage_page = None
usage = None
while pos < len(rd):
key = rd[pos]
# First step, determine the value encoding (either long or short).
key_size, value_length = GetValueLength(rd, pos)
if key & REPORT_DESCRIPTOR_KEY_MASK == INPUT_ITEM:
if report_count and report_size:
byte_length = (report_count * report_size) // 8
desc.internal_max_in_report_len = max(
desc.internal_max_in_report_len, byte_length)
report_count = None
report_size = None
elif key & REPORT_DESCRIPTOR_KEY_MASK == OUTPUT_ITEM:
if report_count and report_size:
byte_length = (report_count * report_size) // 8
desc.internal_max_out_report_len = max(
desc.internal_max_out_report_len, byte_length)
report_count = None
report_size = None
elif key & REPORT_DESCRIPTOR_KEY_MASK == COLLECTION_ITEM:
if usage_page:
desc.usage_page = usage_page
if usage:
desc.usage = usage
elif key & REPORT_DESCRIPTOR_KEY_MASK == REPORT_COUNT:
if len(rd) >= pos + 1 + value_length:
report_count = ReadLsbBytes(rd, pos + 1, value_length)
elif key & REPORT_DESCRIPTOR_KEY_MASK == REPORT_SIZE:
if len(rd) >= pos + 1 + value_length:
report_size = ReadLsbBytes(rd, pos + 1, value_length)
elif key & REPORT_DESCRIPTOR_KEY_MASK == USAGE_PAGE:
if len(rd) >= pos + 1 + value_length:
usage_page = ReadLsbBytes(rd, pos + 1, value_length)
elif key & REPORT_DESCRIPTOR_KEY_MASK == USAGE:
if len(rd) >= pos + 1 + value_length:
usage = ReadLsbBytes(rd, pos + 1, value_length)
pos += value_length + key_size
return desc
def ParseUevent(uevent, desc):
lines = uevent.split(b'\n')
for line in lines:
line = line.strip()
if not line:
continue
k, v = line.split(b'=')
if k == b'HID_NAME':
desc.product_string = v.decode('utf8')
elif k == b'HID_ID':
_, vid, pid = v.split(b':')
desc.vendor_id = int(vid, 16)
desc.product_id = int(pid, 16)
class LinuxHidDevice(base.HidDevice):
"""Implementation of HID device for linux.
Implementation of HID device interface for linux that uses block
devices to interact with the device and sysfs to enumerate/discover
device metadata.
"""
@staticmethod
def Enumerate():
for hidraw in os.listdir('/sys/class/hidraw'):
rd_path = (
os.path.join(
'/sys/class/hidraw', hidraw,
'device/report_descriptor'))
uevent_path = os.path.join('/sys/class/hidraw', hidraw, 'device/uevent')
rd_file = open(rd_path, 'rb')
uevent_file = open(uevent_path, 'rb')
desc = base.DeviceDescriptor()
desc.path = os.path.join('/dev/', hidraw)
ParseReportDescriptor(rd_file.read(), desc)
ParseUevent(uevent_file.read(), desc)
rd_file.close()
uevent_file.close()
yield desc.ToPublicDict()
def __init__(self, path):
base.HidDevice.__init__(self, path)
self.dev = os.open(path, os.O_RDWR)
self.desc = base.DeviceDescriptor()
self.desc.path = path
rd_file = open(os.path.join('/sys/class/hidraw',
os.path.basename(path),
'device/report_descriptor'), 'rb')
ParseReportDescriptor(rd_file.read(), self.desc)
rd_file.close()
def GetInReportDataLength(self):
"""See base class."""
return self.desc.internal_max_in_report_len
def GetOutReportDataLength(self):
"""See base class."""
return self.desc.internal_max_out_report_len
def Write(self, packet):
"""See base class."""
out = bytearray([0] + packet) # Prepend the zero-byte (report ID)
os.write(self.dev, out)
def Read(self):
"""See base class."""
raw_in = os.read(self.dev, self.GetInReportDataLength())
decoded_in = list(bytearray(raw_in))
return decoded_in
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyu2f/pyu2f/hid/linux.py
| 0.742328 | 0.389953 |
linux.py
|
pypi
|
.. _usage:
Usage
=====
This section describes the usage of the Python-RSA module.
Before you can use RSA you need keys. You will receive a private key
and a public key.
.. important::
The private key is called *private* for a reason. Never share this
key with anyone.
The public key is used for encrypting a message such that it can only
be read by the owner of the private key. As such it's also referred to
as the *encryption key*. Decrypting a message can only be done using
the private key, hence it's also called the *decryption key*.
The private key is used for signing a message. With this signature and
the public key, the receiver can verify that a message was signed
by the owner of the private key, and that the message was not modified
after signing.
Generating keys
---------------
You can use the :py:func:`rsa.newkeys` function to create a keypair:
>>> import rsa
>>> (pubkey, privkey) = rsa.newkeys(512)
Alternatively you can use :py:meth:`rsa.PrivateKey.load_pkcs1` and
:py:meth:`rsa.PublicKey.load_pkcs1` to load keys from a file:
>>> import rsa
>>> with open('private.pem', mode='rb') as privatefile:
... keydata = privatefile.read()
>>> privkey = rsa.PrivateKey.load_pkcs1(keydata)
Time to generate a key
++++++++++++++++++++++
Generating a keypair may take a long time, depending on the number of
bits required. The number of bits determines the cryptographic
strength of the key, as well as the size of the message you can
encrypt. If you don't mind having a slightly smaller key than you
requested, you can pass ``accurate=False`` to speed up the key
generation process.
Another way to speed up the key generation process is to use multiple
processes in parallel to speed up the key generation. Use no more than
the number of processes that your machine can run in parallel; a
dual-core machine should use ``poolsize=2``; a quad-core
hyperthreading machine can run two threads on each core, and thus can
use ``poolsize=8``.
>>> (pubkey, privkey) = rsa.newkeys(512, poolsize=8)
These are some average timings from my desktop machine (Linux 2.6,
2.93 GHz quad-core Intel Core i7, 16 GB RAM) using 64-bit CPython 2.7.
Since key generation is a random process, times may differ even on
similar hardware. On all tests, we used the default ``accurate=True``.
+----------------+------------------+------------------+
| Keysize (bits) | single process | eight processes |
+================+==================+==================+
| 128 | 0.01 sec. | 0.01 sec. |
+----------------+------------------+------------------+
| 256 | 0.03 sec. | 0.02 sec. |
+----------------+------------------+------------------+
| 384 | 0.09 sec. | 0.04 sec. |
+----------------+------------------+------------------+
| 512 | 0.11 sec. | 0.07 sec. |
+----------------+------------------+------------------+
| 1024 | 0.79 sec. | 0.30 sec. |
+----------------+------------------+------------------+
| 2048 | 6.55 sec. | 1.60 sec. |
+----------------+------------------+------------------+
| 3072 | 23.4 sec. | 7.14 sec. |
+----------------+------------------+------------------+
| 4096 | 72.0 sec. | 24.4 sec. |
+----------------+------------------+------------------+
If key generation is too slow for you, you could use OpenSSL to
generate them for you, then load them in your Python code. OpenSSL
generates a 4096-bit key in 3.5 seconds on the same machine as used
above. See :ref:`openssl` for more information.
Encryption and decryption
-------------------------
To encrypt or decrypt a message, use :py:func:`rsa.encrypt` resp.
:py:func:`rsa.decrypt`. Let's say that Alice wants to send a message
that only Bob can read.
#. Bob generates a keypair, and gives the public key to Alice. This is
done such that Alice knows for sure that the key is really Bob's
(for example by handing over a USB stick that contains the key).
>>> import rsa
>>> (bob_pub, bob_priv) = rsa.newkeys(512)
#. Alice writes a message, and encodes it in UTF-8. The RSA module
only operates on bytes, and not on strings, so this step is
necessary.
>>> message = 'hello Bob!'.encode('utf8')
#. Alice encrypts the message using Bob's public key, and sends the
encrypted message.
>>> import rsa
>>> crypto = rsa.encrypt(message, bob_pub)
#. Bob receives the message, and decrypts it with his private key.
>>> message = rsa.decrypt(crypto, bob_priv)
>>> print(message.decode('utf8'))
hello Bob!
Since Bob kept his private key *private*, Alice can be sure that he is
the only one who can read the message. Bob does *not* know for sure
that it was Alice that sent the message, since she didn't sign it.
RSA can only encrypt messages that are smaller than the key. A couple
of bytes are lost on random padding, and the rest is available for the
message itself. For example, a 512-bit key can encode a 53-byte
message (512 bit = 64 bytes, 11 bytes are used for random padding and
other stuff). See :ref:`bigfiles` for information on how to work with
larger files.
Altering the encrypted information will *likely* cause a
:py:class:`rsa.pkcs1.DecryptionError`. If you want to be *sure*, use
:py:func:`rsa.sign`.
>>> crypto = rsa.encrypt(b'hello', bob_pub)
>>> crypto = crypto[:-1] + b'X' # change the last byte
>>> rsa.decrypt(crypto, bob_priv)
Traceback (most recent call last):
...
rsa.pkcs1.DecryptionError: Decryption failed
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.DecryptionError` exception. It shows where
in the code the exception occurred, and thus leaks information
about the key. It’s only a tiny bit of information, but every bit
makes cracking the keys easier.
Low-level operations
++++++++++++++++++++
The core RSA algorithm operates on large integers. These operations
are considered low-level and are supported by the
:py:func:`rsa.core.encrypt_int` and :py:func:`rsa.core.decrypt_int`
functions.
Signing and verification
------------------------
You can create a detached signature for a message using the
:py:func:`rsa.sign` function:
>>> (pubkey, privkey) = rsa.newkeys(512)
>>> message = 'Go left at the blue tree'
>>> signature = rsa.sign(message, privkey, 'SHA-1')
This hashes the message using SHA-1. Other hash methods are also
possible, check the :py:func:`rsa.sign` function documentation for
details. The hash is then signed with the private key.
It is possible to calculate the hash and signature in separate operations
(i.e for generating the hash on a client machine and then sign with a
private key on remote server). To hash a message use the :py:func:`rsa.compute_hash`
function and then use the :py:func:`rsa.sign_hash` function to sign the hash:
>>> message = 'Go left at the blue tree'
>>> hash = rsa.compute_hash(message, 'SHA-1')
>>> signature = rsa.sign_hash(hash, privkey, 'SHA-1')
In order to verify the signature, use the :py:func:`rsa.verify`
function. This function returns True if the verification is successful:
>>> message = 'Go left at the blue tree'
>>> rsa.verify(message, signature, pubkey)
True
Modify the message, and the signature is no longer valid and a
:py:class:`rsa.pkcs1.VerificationError` is thrown:
>>> message = 'Go right at the blue tree'
>>> rsa.verify(message, signature, pubkey)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/sybren/workspace/python-rsa/rsa/pkcs1.py", line 289, in verify
raise VerificationError('Verification failed')
rsa.pkcs1.VerificationError: Verification failed
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.VerificationError` exception. It shows where
in the code the exception occurred, and thus leaks information
about the key. It's only a tiny bit of information, but every bit
makes cracking the keys easier.
Instead of a message you can also call :py:func:`rsa.sign` and
:py:func:`rsa.verify` with a :py:class:`file`-like object. If the
message object has a ``read(int)`` method it is assumed to be a file.
In that case the file is hashed in 1024-byte blocks at the time.
>>> with open('somefile', 'rb') as msgfile:
... signature = rsa.sign(msgfile, privkey, 'SHA-1')
>>> with open('somefile', 'rb') as msgfile:
... rsa.verify(msgfile, signature, pubkey)
.. _bigfiles:
Working with big files
----------------------
RSA can only encrypt messages that are smaller than the key. A couple
of bytes are lost on random padding, and the rest is available for the
message itself. For example, a 512-bit key can encode a 53-byte
message (512 bit = 64 bytes, 11 bytes are used for random padding and
other stuff).
How it usually works
++++++++++++++++++++
The most common way to use RSA with larger files uses a block cypher
like AES or DES3 to encrypt the file with a random key, then encrypt
the random key with RSA. You would send the encrypted file along with
the encrypted key to the recipient. The complete flow is:
#. Generate a random key
>>> import rsa.randnum
>>> aes_key = rsa.randnum.read_random_bits(128)
#. Use that key to encrypt the file with AES.
#. :py:func:`Encrypt <rsa.encrypt>` the AES key with RSA
>>> encrypted_aes_key = rsa.encrypt(aes_key, public_rsa_key)
#. Send the encrypted file together with ``encrypted_aes_key``
#. The recipient now reverses this process to obtain the encrypted
file.
.. note::
The Python-RSA module does not contain functionality to do the AES
encryption for you.
Only using Python-RSA: the VARBLOCK format
++++++++++++++++++++++++++++++++++++++++++
.. warning::
The VARBLOCK format is NOT recommended for general use, has been deprecated since
Python-RSA 3.4, and has been removed in version 4.0. It's vulnerable to a
number of attacks:
1. decrypt/encrypt_bigfile() does not implement `Authenticated encryption`_ nor
uses MACs to verify messages before decrypting public key encrypted messages.
2. decrypt/encrypt_bigfile() does not use hybrid encryption (it uses plain RSA)
and has no method for chaining, so block reordering is possible.
See `issue #19 on GitHub`_ for more information.
.. _Authenticated encryption: https://en.wikipedia.org/wiki/Authenticated_encryption
.. _issue #19 on GitHub: https://github.com/sybrenstuvel/python-rsa/issues/13
As of Python-RSA version 4.0, the VARBLOCK format has been removed from the
library. For now, this section is kept here to document the issues with that
format, and ensure we don't do something like that again.
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/rsa/doc/usage.rst
| 0.921843 | 0.666402 |
usage.rst
|
pypi
|
from rsa._compat import zip
"""Common functionality shared by several modules."""
class NotRelativePrimeError(ValueError):
def __init__(self, a, b, d, msg=None):
super(NotRelativePrimeError, self).__init__(
msg or "%d and %d are not relatively prime, divider=%i" % (a, b, d))
self.a = a
self.b = b
self.d = d
def bit_size(num):
"""
Number of bits needed to represent a integer excluding any prefix
0 bits.
Usage::
>>> bit_size(1023)
10
>>> bit_size(1024)
11
>>> bit_size(1025)
11
:param num:
Integer value. If num is 0, returns 0. Only the absolute value of the
number is considered. Therefore, signed integers will be abs(num)
before the number's bit length is determined.
:returns:
Returns the number of bits in the integer.
"""
try:
return num.bit_length()
except AttributeError:
raise TypeError('bit_size(num) only supports integers, not %r' % type(num))
def byte_size(number):
"""
Returns the number of bytes required to hold a specific long number.
The number of bytes is rounded up.
Usage::
>>> byte_size(1 << 1023)
128
>>> byte_size((1 << 1024) - 1)
128
>>> byte_size(1 << 1024)
129
:param number:
An unsigned integer
:returns:
The number of bytes required to hold a specific long number.
"""
if number == 0:
return 1
return ceil_div(bit_size(number), 8)
def ceil_div(num, div):
"""
Returns the ceiling function of a division between `num` and `div`.
Usage::
>>> ceil_div(100, 7)
15
>>> ceil_div(100, 10)
10
>>> ceil_div(1, 4)
1
:param num: Division's numerator, a number
:param div: Division's divisor, a number
:return: Rounded up result of the division between the parameters.
"""
quanta, mod = divmod(num, div)
if mod:
quanta += 1
return quanta
def extended_gcd(a, b):
"""Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb
"""
# r = gcd(a,b) i = multiplicitive inverse of a mod b
# or j = multiplicitive inverse of b mod a
# Neg return values for i or j are made positive mod b or a respectively
# Iterateive Version is faster and uses much less stack space
x = 0
y = 1
lx = 1
ly = 0
oa = a # Remember original a/b to remove
ob = b # negative values from return results
while b != 0:
q = a // b
(a, b) = (b, a % b)
(x, lx) = ((lx - (q * x)), x)
(y, ly) = ((ly - (q * y)), y)
if lx < 0:
lx += ob # If neg wrap modulo orignal b
if ly < 0:
ly += oa # If neg wrap modulo orignal a
return a, lx, ly # Return only positive values
def inverse(x, n):
"""Returns the inverse of x % n under multiplication, a.k.a x^-1 (mod n)
>>> inverse(7, 4)
3
>>> (inverse(143, 4) * 143) % 4
1
"""
(divider, inv, _) = extended_gcd(x, n)
if divider != 1:
raise NotRelativePrimeError(x, n, divider)
return inv
def crt(a_values, modulo_values):
"""Chinese Remainder Theorem.
Calculates x such that x = a[i] (mod m[i]) for each i.
:param a_values: the a-values of the above equation
:param modulo_values: the m-values of the above equation
:returns: x such that x = a[i] (mod m[i]) for each i
>>> crt([2, 3], [3, 5])
8
>>> crt([2, 3, 2], [3, 5, 7])
23
>>> crt([2, 3, 0], [7, 11, 15])
135
"""
m = 1
x = 0
for modulo in modulo_values:
m *= modulo
for (m_i, a_i) in zip(modulo_values, a_values):
M_i = m // m_i
inv = inverse(M_i, m_i)
x = (x + a_i * M_i * inv) % m
return x
if __name__ == '__main__':
import doctest
doctest.testmod()
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/rsa/rsa/common.py
| 0.921988 | 0.614857 |
common.py
|
pypi
|
from __future__ import with_statement, print_function
import sys
from optparse import OptionParser
import rsa.key
def private_to_public():
"""Reads a private key and outputs the corresponding public key."""
# Parse the CLI options
parser = OptionParser(usage='usage: %prog [options]',
description='Reads a private key and outputs the '
'corresponding public key. Both private and public keys use '
'the format described in PKCS#1 v1.5')
parser.add_option('-i', '--input', dest='infilename', type='string',
help='Input filename. Reads from stdin if not specified')
parser.add_option('-o', '--output', dest='outfilename', type='string',
help='Output filename. Writes to stdout of not specified')
parser.add_option('--inform', dest='inform',
help='key format of input - default PEM',
choices=('PEM', 'DER'), default='PEM')
parser.add_option('--outform', dest='outform',
help='key format of output - default PEM',
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv)
# Read the input data
if cli.infilename:
print('Reading private key from %s in %s format' %
(cli.infilename, cli.inform), file=sys.stderr)
with open(cli.infilename, 'rb') as infile:
in_data = infile.read()
else:
print('Reading private key from stdin in %s format' % cli.inform,
file=sys.stderr)
in_data = sys.stdin.read().encode('ascii')
assert type(in_data) == bytes, type(in_data)
# Take the public fields and create a public key
priv_key = rsa.key.PrivateKey.load_pkcs1(in_data, cli.inform)
pub_key = rsa.key.PublicKey(priv_key.n, priv_key.e)
# Save to the output file
out_data = pub_key.save_pkcs1(cli.outform)
if cli.outfilename:
print('Writing public key to %s in %s format' %
(cli.outfilename, cli.outform), file=sys.stderr)
with open(cli.outfilename, 'wb') as outfile:
outfile.write(out_data)
else:
print('Writing public key to stdout in %s format' % cli.outform,
file=sys.stderr)
sys.stdout.write(out_data.decode('ascii'))
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/rsa/rsa/util.py
| 0.426083 | 0.162214 |
util.py
|
pypi
|
import base64
from rsa._compat import is_bytes, range
def _markers(pem_marker):
"""
Returns the start and end PEM markers, as bytes.
"""
if not is_bytes(pem_marker):
pem_marker = pem_marker.encode('ascii')
return (b'-----BEGIN ' + pem_marker + b'-----',
b'-----END ' + pem_marker + b'-----')
def load_pem(contents, pem_marker):
"""Loads a PEM file.
:param contents: the contents of the file to interpret
:param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
when your file has '-----BEGIN RSA PRIVATE KEY-----' and
'-----END RSA PRIVATE KEY-----' markers.
:return: the base64-decoded content between the start and end markers.
@raise ValueError: when the content is invalid, for example when the start
marker cannot be found.
"""
# We want bytes, not text. If it's text, it can be converted to ASCII bytes.
if not is_bytes(contents):
contents = contents.encode('ascii')
(pem_start, pem_end) = _markers(pem_marker)
pem_lines = []
in_pem_part = False
for line in contents.splitlines():
line = line.strip()
# Skip empty lines
if not line:
continue
# Handle start marker
if line == pem_start:
if in_pem_part:
raise ValueError('Seen start marker "%s" twice' % pem_start)
in_pem_part = True
continue
# Skip stuff before first marker
if not in_pem_part:
continue
# Handle end marker
if in_pem_part and line == pem_end:
in_pem_part = False
break
# Load fields
if b':' in line:
continue
pem_lines.append(line)
# Do some sanity checks
if not pem_lines:
raise ValueError('No PEM start marker "%s" found' % pem_start)
if in_pem_part:
raise ValueError('No PEM end marker "%s" found' % pem_end)
# Base64-decode the contents
pem = b''.join(pem_lines)
return base64.standard_b64decode(pem)
def save_pem(contents, pem_marker):
"""Saves a PEM file.
:param contents: the contents to encode in PEM format
:param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
when your file has '-----BEGIN RSA PRIVATE KEY-----' and
'-----END RSA PRIVATE KEY-----' markers.
:return: the base64-encoded content between the start and end markers, as bytes.
"""
(pem_start, pem_end) = _markers(pem_marker)
b64 = base64.standard_b64encode(contents).replace(b'\n', b'')
pem_lines = [pem_start]
for block_start in range(0, len(b64), 64):
block = b64[block_start:block_start + 64]
pem_lines.append(block)
pem_lines.append(pem_end)
pem_lines.append(b'')
return b'\n'.join(pem_lines)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/rsa/rsa/pem.py
| 0.676086 | 0.274744 |
pem.py
|
pypi
|
from __future__ import absolute_import
import binascii
from struct import pack
from rsa._compat import byte, is_integer
from rsa import common, machine_size
def bytes2int(raw_bytes):
r"""Converts a list of bytes or an 8-bit string to an integer.
When using unicode strings, encode it to some encoding like UTF8 first.
>>> (((128 * 256) + 64) * 256) + 15
8405007
>>> bytes2int(b'\x80@\x0f')
8405007
"""
return int(binascii.hexlify(raw_bytes), 16)
def _int2bytes(number, block_size=None):
r"""Converts a number to a string of bytes.
Usage::
>>> _int2bytes(123456789)
b'\x07[\xcd\x15'
>>> bytes2int(_int2bytes(123456789))
123456789
>>> _int2bytes(123456789, 6)
b'\x00\x00\x07[\xcd\x15'
>>> bytes2int(_int2bytes(123456789, 128))
123456789
>>> _int2bytes(123456789, 3)
Traceback (most recent call last):
...
OverflowError: Needed 4 bytes for number, but block size is 3
@param number: the number to convert
@param block_size: the number of bytes to output. If the number encoded to
bytes is less than this, the block will be zero-padded. When not given,
the returned block is not padded.
@throws OverflowError when block_size is given and the number takes up more
bytes than fit into the block.
"""
# Type checking
if not is_integer(number):
raise TypeError("You must pass an integer for 'number', not %s" %
number.__class__)
if number < 0:
raise ValueError('Negative numbers cannot be used: %i' % number)
# Do some bounds checking
if number == 0:
needed_bytes = 1
raw_bytes = [b'\x00']
else:
needed_bytes = common.byte_size(number)
raw_bytes = []
# You cannot compare None > 0 in Python 3x. It will fail with a TypeError.
if block_size and block_size > 0:
if needed_bytes > block_size:
raise OverflowError('Needed %i bytes for number, but block size '
'is %i' % (needed_bytes, block_size))
# Convert the number to bytes.
while number > 0:
raw_bytes.insert(0, byte(number & 0xFF))
number >>= 8
# Pad with zeroes to fill the block
if block_size and block_size > 0:
padding = (block_size - needed_bytes) * b'\x00'
else:
padding = b''
return padding + b''.join(raw_bytes)
def bytes_leading(raw_bytes, needle=b'\x00'):
"""
Finds the number of prefixed byte occurrences in the haystack.
Useful when you want to deal with padding.
:param raw_bytes:
Raw bytes.
:param needle:
The byte to count. Default \x00.
:returns:
The number of leading needle bytes.
"""
leading = 0
# Indexing keeps compatibility between Python 2.x and Python 3.x
_byte = needle[0]
for x in raw_bytes:
if x == _byte:
leading += 1
else:
break
return leading
def int2bytes(number, fill_size=None, chunk_size=None, overflow=False):
"""
Convert an unsigned integer to bytes (base-256 representation)::
Does not preserve leading zeros if you don't specify a chunk size or
fill size.
.. NOTE:
You must not specify both fill_size and chunk_size. Only one
of them is allowed.
:param number:
Integer value
:param fill_size:
If the optional fill size is given the length of the resulting
byte string is expected to be the fill size and will be padded
with prefix zero bytes to satisfy that length.
:param chunk_size:
If optional chunk size is given and greater than zero, pad the front of
the byte string with binary zeros so that the length is a multiple of
``chunk_size``.
:param overflow:
``False`` (default). If this is ``True``, no ``OverflowError``
will be raised when the fill_size is shorter than the length
of the generated byte sequence. Instead the byte sequence will
be returned as is.
:returns:
Raw bytes (base-256 representation).
:raises:
``OverflowError`` when fill_size is given and the number takes up more
bytes than fit into the block. This requires the ``overflow``
argument to this function to be set to ``False`` otherwise, no
error will be raised.
"""
if number < 0:
raise ValueError("Number must be an unsigned integer: %d" % number)
if fill_size and chunk_size:
raise ValueError("You can either fill or pad chunks, but not both")
# Ensure these are integers.
number & 1
raw_bytes = b''
# Pack the integer one machine word at a time into bytes.
num = number
word_bits, _, max_uint, pack_type = machine_size.get_word_alignment(num)
pack_format = ">%s" % pack_type
while num > 0:
raw_bytes = pack(pack_format, num & max_uint) + raw_bytes
num >>= word_bits
# Obtain the index of the first non-zero byte.
zero_leading = bytes_leading(raw_bytes)
if number == 0:
raw_bytes = b'\x00'
# De-padding.
raw_bytes = raw_bytes[zero_leading:]
length = len(raw_bytes)
if fill_size and fill_size > 0:
if not overflow and length > fill_size:
raise OverflowError(
"Need %d bytes for number, but fill size is %d" %
(length, fill_size)
)
raw_bytes = raw_bytes.rjust(fill_size, b'\x00')
elif chunk_size and chunk_size > 0:
remainder = length % chunk_size
if remainder:
padding_size = chunk_size - remainder
raw_bytes = raw_bytes.rjust(length + padding_size, b'\x00')
return raw_bytes
if __name__ == '__main__':
import doctest
doctest.testmod()
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/rsa/rsa/transform.py
| 0.89099 | 0.62065 |
transform.py
|
pypi
|
from rsa._compat import range
import rsa.common
import rsa.randnum
__all__ = ['getprime', 'are_relatively_prime']
def gcd(p, q):
"""Returns the greatest common divisor of p and q
>>> gcd(48, 180)
12
"""
while q != 0:
(p, q) = (q, p % q)
return p
def get_primality_testing_rounds(number):
"""Returns minimum number of rounds for Miller-Rabing primality testing,
based on number bitsize.
According to NIST FIPS 186-4, Appendix C, Table C.3, minimum number of
rounds of M-R testing, using an error probability of 2 ** (-100), for
different p, q bitsizes are:
* p, q bitsize: 512; rounds: 7
* p, q bitsize: 1024; rounds: 4
* p, q bitsize: 1536; rounds: 3
See: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
"""
# Calculate number bitsize.
bitsize = rsa.common.bit_size(number)
# Set number of rounds.
if bitsize >= 1536:
return 3
if bitsize >= 1024:
return 4
if bitsize >= 512:
return 7
# For smaller bitsizes, set arbitrary number of rounds.
return 10
def miller_rabin_primality_testing(n, k):
"""Calculates whether n is composite (which is always correct) or prime
(which theoretically is incorrect with error probability 4**-k), by
applying Miller-Rabin primality testing.
For reference and implementation example, see:
https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
:param n: Integer to be tested for primality.
:type n: int
:param k: Number of rounds (witnesses) of Miller-Rabin testing.
:type k: int
:return: False if the number is composite, True if it's probably prime.
:rtype: bool
"""
# prevent potential infinite loop when d = 0
if n < 2:
return False
# Decompose (n - 1) to write it as (2 ** r) * d
# While d is even, divide it by 2 and increase the exponent.
d = n - 1
r = 0
while not (d & 1):
r += 1
d >>= 1
# Test k witnesses.
for _ in range(k):
# Generate random integer a, where 2 <= a <= (n - 2)
a = rsa.randnum.randint(n - 3) + 1
x = pow(a, d, n)
if x == 1 or x == n - 1:
continue
for _ in range(r - 1):
x = pow(x, 2, n)
if x == 1:
# n is composite.
return False
if x == n - 1:
# Exit inner loop and continue with next witness.
break
else:
# If loop doesn't break, n is composite.
return False
return True
def is_prime(number):
"""Returns True if the number is prime, and False otherwise.
>>> is_prime(2)
True
>>> is_prime(42)
False
>>> is_prime(41)
True
"""
# Check for small numbers.
if number < 10:
return number in {2, 3, 5, 7}
# Check for even numbers.
if not (number & 1):
return False
# Calculate minimum number of rounds.
k = get_primality_testing_rounds(number)
# Run primality testing with (minimum + 1) rounds.
return miller_rabin_primality_testing(number, k + 1)
def getprime(nbits):
"""Returns a prime number that can be stored in 'nbits' bits.
>>> p = getprime(128)
>>> is_prime(p-1)
False
>>> is_prime(p)
True
>>> is_prime(p+1)
False
>>> from rsa import common
>>> common.bit_size(p) == 128
True
"""
assert nbits > 3 # the loop wil hang on too small numbers
while True:
integer = rsa.randnum.read_random_odd_int(nbits)
# Test for primeness
if is_prime(integer):
return integer
# Retry if not prime
def are_relatively_prime(a, b):
"""Returns True if a and b are relatively prime, and False if they
are not.
>>> are_relatively_prime(2, 3)
True
>>> are_relatively_prime(2, 4)
False
"""
d = gcd(a, b)
return d == 1
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count % 100 == 0 and count:
print('%i times' % count)
print('Doctests done')
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/rsa/rsa/prime.py
| 0.81538 | 0.684192 |
prime.py
|
pypi
|
import hashlib
import os
from rsa._compat import range
from rsa import common, transform, core
# ASN.1 codes that describe the hash algorithm used.
HASH_ASN1 = {
'MD5': b'\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10',
'SHA-1': b'\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14',
'SHA-224': b'\x30\x2d\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x04\x05\x00\x04\x1c',
'SHA-256': b'\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20',
'SHA-384': b'\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30',
'SHA-512': b'\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40',
}
HASH_METHODS = {
'MD5': hashlib.md5,
'SHA-1': hashlib.sha1,
'SHA-224': hashlib.sha224,
'SHA-256': hashlib.sha256,
'SHA-384': hashlib.sha384,
'SHA-512': hashlib.sha512,
}
class CryptoError(Exception):
"""Base class for all exceptions in this module."""
class DecryptionError(CryptoError):
"""Raised when decryption fails."""
class VerificationError(CryptoError):
"""Raised when verification fails."""
def _pad_for_encryption(message, target_length):
r"""Pads the message for encryption, returning the padded message.
:return: 00 02 RANDOM_DATA 00 MESSAGE
>>> block = _pad_for_encryption(b'hello', 16)
>>> len(block)
16
>>> block[0:2]
b'\x00\x02'
>>> block[-6:]
b'\x00hello'
"""
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError('%i bytes needed for message, but there is only'
' space for %i' % (msglength, max_msglength))
# Get random padding
padding = b''
padding_length = target_length - msglength - 3
# We remove 0-bytes, so we'll end up with less padding than we've asked for,
# so keep adding data until we're at the correct length.
while len(padding) < padding_length:
needed_bytes = padding_length - len(padding)
# Always read at least 8 bytes more than we need, and trim off the rest
# after removing the 0-bytes. This increases the chance of getting
# enough bytes, especially when needed_bytes is small
new_padding = os.urandom(needed_bytes + 5)
new_padding = new_padding.replace(b'\x00', b'')
padding = padding + new_padding[:needed_bytes]
assert len(padding) == padding_length
return b''.join([b'\x00\x02',
padding,
b'\x00',
message])
def _pad_for_signing(message, target_length):
r"""Pads the message for signing, returning the padded message.
The padding is always a repetition of FF bytes.
:return: 00 01 PADDING 00 MESSAGE
>>> block = _pad_for_signing(b'hello', 16)
>>> len(block)
16
>>> block[0:2]
b'\x00\x01'
>>> block[-6:]
b'\x00hello'
>>> block[2:-6]
b'\xff\xff\xff\xff\xff\xff\xff\xff'
"""
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError('%i bytes needed for message, but there is only'
' space for %i' % (msglength, max_msglength))
padding_length = target_length - msglength - 3
return b''.join([b'\x00\x01',
padding_length * b'\xff',
b'\x00',
message])
def encrypt(message, pub_key):
"""Encrypts the given message using PKCS#1 v1.5
:param message: the message to encrypt. Must be a byte string no longer than
``k-11`` bytes, where ``k`` is the number of bytes needed to encode
the ``n`` component of the public key.
:param pub_key: the :py:class:`rsa.PublicKey` to encrypt with.
:raise OverflowError: when the message is too large to fit in the padded
block.
>>> from rsa import key, common
>>> (pub_key, priv_key) = key.newkeys(256)
>>> message = b'hello'
>>> crypto = encrypt(message, pub_key)
The crypto text should be just as long as the public key 'n' component:
>>> len(crypto) == common.byte_size(pub_key.n)
True
"""
keylength = common.byte_size(pub_key.n)
padded = _pad_for_encryption(message, keylength)
payload = transform.bytes2int(padded)
encrypted = core.encrypt_int(payload, pub_key.e, pub_key.n)
block = transform.int2bytes(encrypted, keylength)
return block
def decrypt(crypto, priv_key):
r"""Decrypts the given message using PKCS#1 v1.5
The decryption is considered 'failed' when the resulting cleartext doesn't
start with the bytes 00 02, or when the 00 byte between the padding and
the message cannot be found.
:param crypto: the crypto text as returned by :py:func:`rsa.encrypt`
:param priv_key: the :py:class:`rsa.PrivateKey` to decrypt with.
:raise DecryptionError: when the decryption fails. No details are given as
to why the code thinks the decryption fails, as this would leak
information about the private key.
>>> import rsa
>>> (pub_key, priv_key) = rsa.newkeys(256)
It works with strings:
>>> crypto = encrypt(b'hello', pub_key)
>>> decrypt(crypto, priv_key)
b'hello'
And with binary data:
>>> crypto = encrypt(b'\x00\x00\x00\x00\x01', pub_key)
>>> decrypt(crypto, priv_key)
b'\x00\x00\x00\x00\x01'
Altering the encrypted information will *likely* cause a
:py:class:`rsa.pkcs1.DecryptionError`. If you want to be *sure*, use
:py:func:`rsa.sign`.
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.DecryptionError` exception. It shows where in the
code the exception occurred, and thus leaks information about the key.
It's only a tiny bit of information, but every bit makes cracking the
keys easier.
>>> crypto = encrypt(b'hello', pub_key)
>>> crypto = crypto[0:5] + b'X' + crypto[6:] # change a byte
>>> decrypt(crypto, priv_key)
Traceback (most recent call last):
...
rsa.pkcs1.DecryptionError: Decryption failed
"""
blocksize = common.byte_size(priv_key.n)
encrypted = transform.bytes2int(crypto)
decrypted = priv_key.blinded_decrypt(encrypted)
cleartext = transform.int2bytes(decrypted, blocksize)
# Detect leading zeroes in the crypto. These are not reflected in the
# encrypted value (as leading zeroes do not influence the value of an
# integer). This fixes CVE-2020-13757.
if len(crypto) > blocksize:
raise DecryptionError('Decryption failed')
# If we can't find the cleartext marker, decryption failed.
if cleartext[0:2] != b'\x00\x02':
raise DecryptionError('Decryption failed')
# Find the 00 separator between the padding and the message
try:
sep_idx = cleartext.index(b'\x00', 2)
except ValueError:
raise DecryptionError('Decryption failed')
return cleartext[sep_idx + 1:]
def sign_hash(hash_value, priv_key, hash_method):
"""Signs a precomputed hash with the private key.
Hashes the message, then signs the hash with the given key. This is known
as a "detached signature", because the message itself isn't altered.
:param hash_value: A precomputed hash to sign (ignores message). Should be set to
None if needing to hash and sign message.
:param priv_key: the :py:class:`rsa.PrivateKey` to sign with
:param hash_method: the hash method used on the message. Use 'MD5', 'SHA-1',
'SHA-224', SHA-256', 'SHA-384' or 'SHA-512'.
:return: a message signature block.
:raise OverflowError: if the private key is too small to contain the
requested hash.
"""
# Get the ASN1 code for this hash method
if hash_method not in HASH_ASN1:
raise ValueError('Invalid hash method: %s' % hash_method)
asn1code = HASH_ASN1[hash_method]
# Encrypt the hash with the private key
cleartext = asn1code + hash_value
keylength = common.byte_size(priv_key.n)
padded = _pad_for_signing(cleartext, keylength)
payload = transform.bytes2int(padded)
encrypted = priv_key.blinded_encrypt(payload)
block = transform.int2bytes(encrypted, keylength)
return block
def sign(message, priv_key, hash_method):
"""Signs the message with the private key.
Hashes the message, then signs the hash with the given key. This is known
as a "detached signature", because the message itself isn't altered.
:param message: the message to sign. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param priv_key: the :py:class:`rsa.PrivateKey` to sign with
:param hash_method: the hash method used on the message. Use 'MD5', 'SHA-1',
'SHA-224', SHA-256', 'SHA-384' or 'SHA-512'.
:return: a message signature block.
:raise OverflowError: if the private key is too small to contain the
requested hash.
"""
msg_hash = compute_hash(message, hash_method)
return sign_hash(msg_hash, priv_key, hash_method)
def verify(message, signature, pub_key):
"""Verifies that the signature matches the message.
The hash method is detected automatically from the signature.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param signature: the signature block, as created with :py:func:`rsa.sign`.
:param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
:raise VerificationError: when the signature doesn't match the message.
:returns: the name of the used hash.
"""
keylength = common.byte_size(pub_key.n)
encrypted = transform.bytes2int(signature)
decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
clearsig = transform.int2bytes(decrypted, keylength)
# Get the hash method
method_name = _find_method_hash(clearsig)
message_hash = compute_hash(message, method_name)
# Reconstruct the expected padded hash
cleartext = HASH_ASN1[method_name] + message_hash
expected = _pad_for_signing(cleartext, keylength)
if len(signature) != keylength:
raise VerificationError('Verification failed')
# Compare with the signed one
if expected != clearsig:
raise VerificationError('Verification failed')
return method_name
def find_signature_hash(signature, pub_key):
"""Returns the hash name detected from the signature.
If you also want to verify the message, use :py:func:`rsa.verify()` instead.
It also returns the name of the used hash.
:param signature: the signature block, as created with :py:func:`rsa.sign`.
:param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
:returns: the name of the used hash.
"""
keylength = common.byte_size(pub_key.n)
encrypted = transform.bytes2int(signature)
decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
clearsig = transform.int2bytes(decrypted, keylength)
return _find_method_hash(clearsig)
def yield_fixedblocks(infile, blocksize):
"""Generator, yields each block of ``blocksize`` bytes in the input file.
:param infile: file to read and separate in blocks.
:param blocksize: block size in bytes.
:returns: a generator that yields the contents of each block
"""
while True:
block = infile.read(blocksize)
read_bytes = len(block)
if read_bytes == 0:
break
yield block
if read_bytes < blocksize:
break
def compute_hash(message, method_name):
"""Returns the message digest.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param method_name: the hash method, must be a key of
:py:const:`HASH_METHODS`.
"""
if method_name not in HASH_METHODS:
raise ValueError('Invalid hash method: %s' % method_name)
method = HASH_METHODS[method_name]
hasher = method()
if hasattr(message, 'read') and hasattr(message.read, '__call__'):
# read as 1K blocks
for block in yield_fixedblocks(message, 1024):
hasher.update(block)
else:
# hash the message object itself.
hasher.update(message)
return hasher.digest()
def _find_method_hash(clearsig):
"""Finds the hash method.
:param clearsig: full padded ASN1 and hash.
:return: the used hash method.
:raise VerificationFailed: when the hash method cannot be found
"""
for (hashname, asn1code) in HASH_ASN1.items():
if asn1code in clearsig:
return hashname
raise VerificationError('Verification failed')
__all__ = ['encrypt', 'decrypt', 'sign', 'verify',
'DecryptionError', 'VerificationError', 'CryptoError']
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count % 100 == 0 and count:
print('%i times' % count)
print('Doctests done')
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/rsa/rsa/pkcs1.py
| 0.662906 | 0.435721 |
pkcs1.py
|
pypi
|
from rsa._compat import range
from rsa import (
common,
pkcs1,
transform,
)
def mgf1(seed, length, hasher='SHA-1'):
"""
MGF1 is a Mask Generation Function based on a hash function.
A mask generation function takes an octet string of variable length and a
desired output length as input, and outputs an octet string of the desired
length. The plaintext-awareness of RSAES-OAEP relies on the random nature of
the output of the mask generation function, which in turn relies on the
random nature of the underlying hash.
:param bytes seed: seed from which mask is generated, an octet string
:param int length: intended length in octets of the mask, at most 2^32(hLen)
:param str hasher: hash function (hLen denotes the length in octets of the hash
function output)
:return: mask, an octet string of length `length`
:rtype: bytes
:raise OverflowError: when `length` is too large for the specified `hasher`
:raise ValueError: when specified `hasher` is invalid
"""
try:
hash_length = pkcs1.HASH_METHODS[hasher]().digest_size
except KeyError:
raise ValueError(
'Invalid `hasher` specified. Please select one of: {hash_list}'.format(
hash_list=', '.join(sorted(pkcs1.HASH_METHODS.keys()))
)
)
# If l > 2^32(hLen), output "mask too long" and stop.
if length > (2**32 * hash_length):
raise OverflowError(
"Desired length should be at most 2**32 times the hasher's output "
"length ({hash_length} for {hasher} function)".format(
hash_length=hash_length,
hasher=hasher,
)
)
# Looping `counter` from 0 to ceil(l / hLen)-1, build `output` based on the
# hashes formed by (`seed` + C), being `C` an octet string of length 4
# generated by converting `counter` with the primitive I2OSP
output = b''.join(
pkcs1.compute_hash(
seed + transform.int2bytes(counter, fill_size=4),
method_name=hasher,
)
for counter in range(common.ceil_div(length, hash_length) + 1)
)
# Output the leading `length` octets of `output` as the octet string mask.
return output[:length]
__all__ = [
'mgf1',
]
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count % 100 == 0 and count:
print('%i times' % count)
print('Doctests done')
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/rsa/rsa/pkcs1_v2.py
| 0.782247 | 0.612889 |
pkcs1_v2.py
|
pypi
|
from __future__ import absolute_import
import itertools
import sys
from struct import pack
MAX_INT = sys.maxsize
MAX_INT64 = (1 << 63) - 1
MAX_INT32 = (1 << 31) - 1
MAX_INT16 = (1 << 15) - 1
PY2 = sys.version_info[0] == 2
# Determine the word size of the processor.
if MAX_INT == MAX_INT64:
# 64-bit processor.
MACHINE_WORD_SIZE = 64
elif MAX_INT == MAX_INT32:
# 32-bit processor.
MACHINE_WORD_SIZE = 32
else:
# Else we just assume 64-bit processor keeping up with modern times.
MACHINE_WORD_SIZE = 64
if PY2:
integer_types = (int, long)
range = xrange
zip = itertools.izip
else:
integer_types = (int, )
range = range
zip = zip
def write_to_stdout(data):
"""Writes bytes to stdout
:type data: bytes
"""
if PY2:
sys.stdout.write(data)
else:
# On Py3 we must use the buffer interface to write bytes.
sys.stdout.buffer.write(data)
def is_bytes(obj):
"""
Determines whether the given value is a byte string.
:param obj:
The value to test.
:returns:
``True`` if ``value`` is a byte string; ``False`` otherwise.
"""
return isinstance(obj, bytes)
def is_integer(obj):
"""
Determines whether the given value is an integer.
:param obj:
The value to test.
:returns:
``True`` if ``value`` is an integer; ``False`` otherwise.
"""
return isinstance(obj, integer_types)
def byte(num):
"""
Converts a number between 0 and 255 (both inclusive) to a base-256 (byte)
representation.
Use it as a replacement for ``chr`` where you are expecting a byte
because this will work on all current versions of Python::
:param num:
An unsigned integer between 0 and 255 (both inclusive).
:returns:
A single byte.
"""
return pack("B", num)
def xor_bytes(b1, b2):
"""
Returns the bitwise XOR result between two bytes objects, b1 ^ b2.
Bitwise XOR operation is commutative, so order of parameters doesn't
generate different results. If parameters have different length, extra
length of the largest one is ignored.
:param b1:
First bytes object.
:param b2:
Second bytes object.
:returns:
Bytes object, result of XOR operation.
"""
if PY2:
return ''.join(byte(ord(x) ^ ord(y)) for x, y in zip(b1, b2))
return bytes(x ^ y for x, y in zip(b1, b2))
def get_word_alignment(num, force_arch=64,
_machine_word_size=MACHINE_WORD_SIZE):
"""
Returns alignment details for the given number based on the platform
Python is running on.
:param num:
Unsigned integral number.
:param force_arch:
If you don't want to use 64-bit unsigned chunks, set this to
anything other than 64. 32-bit chunks will be preferred then.
Default 64 will be used when on a 64-bit machine.
:param _machine_word_size:
(Internal) The machine word size used for alignment.
:returns:
4-tuple::
(word_bits, word_bytes,
max_uint, packing_format_type)
"""
max_uint64 = 0xffffffffffffffff
max_uint32 = 0xffffffff
max_uint16 = 0xffff
max_uint8 = 0xff
if force_arch == 64 and _machine_word_size >= 64 and num > max_uint32:
# 64-bit unsigned integer.
return 64, 8, max_uint64, "Q"
elif num > max_uint16:
# 32-bit unsigned integer
return 32, 4, max_uint32, "L"
elif num > max_uint8:
# 16-bit unsigned integer.
return 16, 2, max_uint16, "H"
else:
# 8-bit unsigned integer.
return 8, 1, max_uint8, "B"
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/rsa/rsa/_compat.py
| 0.582491 | 0.482795 |
_compat.py
|
pypi
|
import base64
import socket
import struct
import sys
if getattr(socket, "socket", None) is None:
raise ImportError("socket.socket missing, proxy support unusable")
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
PROXY_TYPE_HTTP_NO_TUNNEL = 4
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception):
pass
class GeneralProxyError(ProxyError):
pass
class Socks5AuthError(ProxyError):
pass
class Socks5Error(ProxyError):
pass
class Socks4Error(ProxyError):
pass
class HTTPError(ProxyError):
pass
_generalerrors = (
"success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input",
)
_socks5errors = (
"succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error",
)
_socks5autherrors = (
"succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error",
)
_socks4errors = (
"request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different "
"user-ids",
"unknown error",
)
def setdefaultproxy(
proxytype=None, addr=None, port=None, rdns=True, username=None, password=None
):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the
namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(
self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None
):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
self.__httptunnel = True
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count - len(data))
if not d:
raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def sendall(self, content, *args):
""" override socket.socket.sendall method to rewrite the header
for non-tunneling proxies if needed
"""
if not self.__httptunnel:
content = self.__rewriteproxy(content)
return super(socksocket, self).sendall(content, *args)
def __rewriteproxy(self, header):
""" rewrite HTTP request headers to support non-tunneling proxies
(i.e. those which do not support the CONNECT method).
This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
"""
host, endpt = None, None
hdrs = header.split("\r\n")
for hdr in hdrs:
if hdr.lower().startswith("host:"):
host = hdr
elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
endpt = hdr
if host and endpt:
hdrs.remove(host)
hdrs.remove(endpt)
host = host.split(" ")[1]
endpt = endpt.split(" ")
if self.__proxy[4] != None and self.__proxy[5] != None:
hdrs.insert(0, self.__getauthheader())
hdrs.insert(0, "Host: %s" % host)
hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
return "\r\n".join(hdrs)
def __getauthheader(self):
auth = self.__proxy[4] + ":" + self.__proxy[5]
return "Proxy-Authorization: Basic " + base64.b64encode(auth)
def setproxy(
self,
proxytype=None,
addr=None,
port=None,
rdns=True,
username=None,
password=None,
headers=None,
):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
headers - Additional or modified headers for the proxy connect
request.
"""
self.__proxy = (
proxytype,
addr,
port,
rdns,
username.encode() if username else None,
password.encode() if password else None,
headers,
)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4] != None) and (self.__proxy[5] != None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack("BBBB", 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack("BBB", 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(
chr(0x01).encode()
+ chr(len(self.__proxy[4]))
+ self.__proxy[4]
+ chr(len(self.__proxy[5]))
+ self.__proxy[5]
)
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack("BBB", 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = (
req
+ chr(0x03).encode()
+ chr(len(destaddr)).encode()
+ destaddr.encode()
)
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2]) <= 8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self, destaddr, destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (
socket.inet_ntoa(resp[4:]),
struct.unpack(">H", resp[2:4])[0],
)
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
wrote_host_header = False
wrote_auth_header = False
if self.__proxy[6] != None:
for key, val in self.__proxy[6].iteritems():
headers += [key, ": ", val, "\r\n"]
wrote_host_header = key.lower() == "host"
wrote_auth_header = key.lower() == "proxy-authorization"
if not wrote_host_header:
headers += ["Host: ", destaddr, "\r\n"]
if not wrote_auth_header:
if self.__proxy[4] != None and self.__proxy[5] != None:
headers += [self.__getauthheader(), "\r\n"]
headers.append("\r\n")
self.sendall("".join(headers).encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (
(not type(destpair) in (list, tuple))
or (len(destpair) < 2)
or (not isinstance(destpair[0], basestring))
or (type(destpair[1]) != int)
):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self, (self.__proxy[1], portnum))
if destpair[1] == 443:
self.__negotiatehttp(destpair[0], destpair[1])
else:
self.__httptunnel = False
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/httplib2/python2/httplib2/socks.py
| 0.421314 | 0.152631 |
socks.py
|
pypi
|
import base64
import socket
import struct
import sys
if getattr(socket, "socket", None) is None:
raise ImportError("socket.socket missing, proxy support unusable")
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
PROXY_TYPE_HTTP_NO_TUNNEL = 4
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception):
pass
class GeneralProxyError(ProxyError):
pass
class Socks5AuthError(ProxyError):
pass
class Socks5Error(ProxyError):
pass
class Socks4Error(ProxyError):
pass
class HTTPError(ProxyError):
pass
_generalerrors = (
"success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input",
)
_socks5errors = (
"succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error",
)
_socks5autherrors = (
"succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error",
)
_socks4errors = (
"request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different "
"user-ids",
"unknown error",
)
def setdefaultproxy(
proxytype=None, addr=None, port=None, rdns=True, username=None, password=None
):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the
namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(
self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None
):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
self.__httptunnel = True
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count - len(data))
if not d:
raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def sendall(self, content, *args):
""" override socket.socket.sendall method to rewrite the header
for non-tunneling proxies if needed
"""
if not self.__httptunnel:
content = self.__rewriteproxy(content)
return super(socksocket, self).sendall(content, *args)
def __rewriteproxy(self, header):
""" rewrite HTTP request headers to support non-tunneling proxies
(i.e. those which do not support the CONNECT method).
This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
"""
host, endpt = None, None
hdrs = header.split("\r\n")
for hdr in hdrs:
if hdr.lower().startswith("host:"):
host = hdr
elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
endpt = hdr
if host and endpt:
hdrs.remove(host)
hdrs.remove(endpt)
host = host.split(" ")[1]
endpt = endpt.split(" ")
if self.__proxy[4] != None and self.__proxy[5] != None:
hdrs.insert(0, self.__getauthheader())
hdrs.insert(0, "Host: %s" % host)
hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
return "\r\n".join(hdrs)
def __getauthheader(self):
auth = self.__proxy[4] + b":" + self.__proxy[5]
return "Proxy-Authorization: Basic " + base64.b64encode(auth).decode()
def setproxy(
self,
proxytype=None,
addr=None,
port=None,
rdns=True,
username=None,
password=None,
headers=None,
):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
headers - Additional or modified headers for the proxy connect
request.
"""
self.__proxy = (
proxytype,
addr,
port,
rdns,
username.encode() if username else None,
password.encode() if password else None,
headers,
)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4] != None) and (self.__proxy[5] != None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack("BBBB", 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack("BBB", 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
packet = bytearray()
packet.append(0x01)
packet.append(len(self.__proxy[4]))
packet.extend(self.__proxy[4])
packet.append(len(self.__proxy[5]))
packet.extend(self.__proxy[5])
self.sendall(packet)
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack("BBB", 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = (
req
+ chr(0x03).encode()
+ chr(len(destaddr)).encode()
+ destaddr.encode()
)
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2]) <= 8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self, destaddr, destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (
socket.inet_ntoa(resp[4:]),
struct.unpack(">H", resp[2:4])[0],
)
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
wrote_host_header = False
wrote_auth_header = False
if self.__proxy[6] != None:
for key, val in self.__proxy[6].iteritems():
headers += [key, ": ", val, "\r\n"]
wrote_host_header = key.lower() == "host"
wrote_auth_header = key.lower() == "proxy-authorization"
if not wrote_host_header:
headers += ["Host: ", destaddr, "\r\n"]
if not wrote_auth_header:
if self.__proxy[4] != None and self.__proxy[5] != None:
headers += [self.__getauthheader(), "\r\n"]
headers.append("\r\n")
self.sendall("".join(headers).encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (
(not type(destpair) in (list, tuple))
or (len(destpair) < 2)
or (not isinstance(destpair[0], (str, bytes)))
or (type(destpair[1]) != int)
):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self, (self.__proxy[1], portnum))
if destpair[1] == 443:
self.__negotiatehttp(destpair[0], destpair[1])
else:
self.__httptunnel = False
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/httplib2/python3/httplib2/socks.py
| 0.425367 | 0.151122 |
socks.py
|
pypi
|
from collections import deque
from apitools.base.py import gzip
__all__ = [
'CompressStream',
]
# pylint: disable=invalid-name
# Note: Apitools only uses the default chunksize when compressing.
def CompressStream(in_stream, length=None, compresslevel=2,
chunksize=16777216):
"""Compresses an input stream into a file-like buffer.
This reads from the input stream until either we've stored at least length
compressed bytes, or the input stream has been exhausted.
This supports streams of unknown size.
Args:
in_stream: The input stream to read from.
length: The target number of compressed bytes to buffer in the output
stream. If length is none, the input stream will be compressed
until it's exhausted.
The actual length of the output buffer can vary from the target.
If the input stream is exhaused, the output buffer may be smaller
than expected. If the data is incompressible, the maximum length
can be exceeded by can be calculated to be:
chunksize + 5 * (floor((chunksize - 1) / 16383) + 1) + 17
This accounts for additional header data gzip adds. For the default
16MiB chunksize, this results in the max size of the output buffer
being:
length + 16Mib + 5142 bytes
compresslevel: Optional, defaults to 2. The desired compression level.
chunksize: Optional, defaults to 16MiB. The chunk size used when
reading data from the input stream to write into the output
buffer.
Returns:
A file-like output buffer of compressed bytes, the number of bytes read
from the input stream, and a flag denoting if the input stream was
exhausted.
"""
in_read = 0
in_exhausted = False
out_stream = StreamingBuffer()
with gzip.GzipFile(mode='wb',
fileobj=out_stream,
compresslevel=compresslevel) as compress_stream:
# Read until we've written at least length bytes to the output stream.
while not length or out_stream.length < length:
data = in_stream.read(chunksize)
data_length = len(data)
compress_stream.write(data)
in_read += data_length
# If we read less than requested, the stream is exhausted.
if data_length < chunksize:
in_exhausted = True
break
return out_stream, in_read, in_exhausted
class StreamingBuffer(object):
"""Provides a file-like object that writes to a temporary buffer.
When data is read from the buffer, it is permanently removed. This is
useful when there are memory constraints preventing the entire buffer from
being stored in memory.
"""
def __init__(self):
# The buffer of byte arrays.
self.__buf = deque()
# The number of bytes in __buf.
self.__size = 0
def __len__(self):
return self.__size
def __nonzero__(self):
# For 32-bit python2.x, len() cannot exceed a 32-bit number; avoid
# accidental len() calls from httplib in the form of "if this_object:".
return bool(self.__size)
@property
def length(self):
# For 32-bit python2.x, len() cannot exceed a 32-bit number.
return self.__size
def write(self, data):
# Gzip can write many 0 byte chunks for highly compressible data.
# Prevent them from being added internally.
if data is not None and data:
self.__buf.append(data)
self.__size += len(data)
def read(self, size=None):
"""Read at most size bytes from this buffer.
Bytes read from this buffer are consumed and are permanently removed.
Args:
size: If provided, read no more than size bytes from the buffer.
Otherwise, this reads the entire buffer.
Returns:
The bytes read from this buffer.
"""
if size is None:
size = self.__size
ret_list = []
while size > 0 and self.__buf:
data = self.__buf.popleft()
size -= len(data)
ret_list.append(data)
if size < 0:
ret_list[-1], remainder = ret_list[-1][:size], ret_list[-1][size:]
self.__buf.appendleft(remainder)
ret = b''.join(ret_list)
self.__size -= len(ret)
return ret
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/apitools/apitools/base/py/compression.py
| 0.903275 | 0.618464 |
compression.py
|
pypi
|
import os
import random
import six
from six.moves import http_client
import six.moves.urllib.error as urllib_error
import six.moves.urllib.parse as urllib_parse
import six.moves.urllib.request as urllib_request
from apitools.base.protorpclite import messages
from apitools.base.py import encoding_helper as encoding
from apitools.base.py import exceptions
if six.PY3:
from collections.abc import Iterable
else:
from collections import Iterable
__all__ = [
'DetectGae',
'DetectGce',
]
_RESERVED_URI_CHARS = r":/?#[]@!$&'()*+,;="
def DetectGae():
"""Determine whether or not we're running on GAE.
This is based on:
https://developers.google.com/appengine/docs/python/#The_Environment
Returns:
True iff we're running on GAE.
"""
server_software = os.environ.get('SERVER_SOFTWARE', '')
return (server_software.startswith('Development/') or
server_software.startswith('Google App Engine/'))
def DetectGce():
"""Determine whether or not we're running on GCE.
This is based on:
https://cloud.google.com/compute/docs/metadata#runninggce
Returns:
True iff we're running on a GCE instance.
"""
metadata_url = 'http://{}'.format(
os.environ.get('GCE_METADATA_ROOT', 'metadata.google.internal'))
try:
o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open(
urllib_request.Request(
metadata_url, headers={'Metadata-Flavor': 'Google'}))
except urllib_error.URLError:
return False
return (o.getcode() == http_client.OK and
o.headers.get('metadata-flavor') == 'Google')
def NormalizeScopes(scope_spec):
"""Normalize scope_spec to a set of strings."""
if isinstance(scope_spec, six.string_types):
scope_spec = six.ensure_str(scope_spec)
return set(scope_spec.split(' '))
elif isinstance(scope_spec, Iterable):
scope_spec = [six.ensure_str(x) for x in scope_spec]
return set(scope_spec)
raise exceptions.TypecheckError(
'NormalizeScopes expected string or iterable, found %s' % (
type(scope_spec),))
def Typecheck(arg, arg_type, msg=None):
if not isinstance(arg, arg_type):
if msg is None:
if isinstance(arg_type, tuple):
msg = 'Type of arg is "%s", not one of %r' % (
type(arg), arg_type)
else:
msg = 'Type of arg is "%s", not "%s"' % (type(arg), arg_type)
raise exceptions.TypecheckError(msg)
return arg
def ExpandRelativePath(method_config, params, relative_path=None):
"""Determine the relative path for request."""
path = relative_path or method_config.relative_path or ''
for param in method_config.path_params:
param_template = '{%s}' % param
# For more details about "reserved word expansion", see:
# http://tools.ietf.org/html/rfc6570#section-3.2.2
reserved_chars = ''
reserved_template = '{+%s}' % param
if reserved_template in path:
reserved_chars = _RESERVED_URI_CHARS
path = path.replace(reserved_template, param_template)
if param_template not in path:
raise exceptions.InvalidUserInputError(
'Missing path parameter %s' % param)
try:
# TODO(craigcitro): Do we want to support some sophisticated
# mapping here?
value = params[param]
except KeyError:
raise exceptions.InvalidUserInputError(
'Request missing required parameter %s' % param)
if value is None:
raise exceptions.InvalidUserInputError(
'Request missing required parameter %s' % param)
try:
if not isinstance(value, six.string_types):
value = str(value)
path = path.replace(param_template,
urllib_parse.quote(value.encode('utf_8'),
reserved_chars))
except TypeError as e:
raise exceptions.InvalidUserInputError(
'Error setting required parameter %s to value %s: %s' % (
param, value, e))
return path
def CalculateWaitForRetry(retry_attempt, max_wait=60):
"""Calculates amount of time to wait before a retry attempt.
Wait time grows exponentially with the number of attempts. A
random amount of jitter is added to spread out retry attempts from
different clients.
Args:
retry_attempt: Retry attempt counter.
max_wait: Upper bound for wait time [seconds].
Returns:
Number of seconds to wait before retrying request.
"""
wait_time = 2 ** retry_attempt
max_jitter = wait_time / 4.0
wait_time += random.uniform(-max_jitter, max_jitter)
return max(1, min(wait_time, max_wait))
def AcceptableMimeType(accept_patterns, mime_type):
"""Return True iff mime_type is acceptable for one of accept_patterns.
Note that this function assumes that all patterns in accept_patterns
will be simple types of the form "type/subtype", where one or both
of these can be "*". We do not support parameters (i.e. "; q=") in
patterns.
Args:
accept_patterns: list of acceptable MIME types.
mime_type: the mime type we would like to match.
Returns:
Whether or not mime_type matches (at least) one of these patterns.
"""
if '/' not in mime_type:
raise exceptions.InvalidUserInputError(
'Invalid MIME type: "%s"' % mime_type)
unsupported_patterns = [p for p in accept_patterns if ';' in p]
if unsupported_patterns:
raise exceptions.GeneratedClientError(
'MIME patterns with parameter unsupported: "%s"' % ', '.join(
unsupported_patterns))
def MimeTypeMatches(pattern, mime_type):
"""Return True iff mime_type is acceptable for pattern."""
# Some systems use a single '*' instead of '*/*'.
if pattern == '*':
pattern = '*/*'
return all(accept in ('*', provided) for accept, provided
in zip(pattern.split('/'), mime_type.split('/')))
return any(MimeTypeMatches(pattern, mime_type)
for pattern in accept_patterns)
def MapParamNames(params, request_type):
"""Reverse parameter remappings for URL construction."""
return [encoding.GetCustomJsonFieldMapping(request_type, json_name=p) or p
for p in params]
def MapRequestParams(params, request_type):
"""Perform any renames/remappings needed for URL construction.
Currently, we have several ways to customize JSON encoding, in
particular of field names and enums. This works fine for JSON
bodies, but also needs to be applied for path and query parameters
in the URL.
This function takes a dictionary from param names to values, and
performs any registered mappings. We also need the request type (to
look up the mappings).
Args:
params: (dict) Map from param names to values
request_type: (protorpc.messages.Message) request type for this API call
Returns:
A new dict of the same size, with all registered mappings applied.
"""
new_params = dict(params)
for param_name, value in params.items():
field_remapping = encoding.GetCustomJsonFieldMapping(
request_type, python_name=param_name)
if field_remapping is not None:
new_params[field_remapping] = new_params.pop(param_name)
param_name = field_remapping
if isinstance(value, messages.Enum):
new_params[param_name] = encoding.GetCustomJsonEnumMapping(
type(value), python_name=str(value)) or str(value)
return new_params
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/apitools/apitools/base/py/util.py
| 0.660172 | 0.153074 |
util.py
|
pypi
|
import base64
import collections
import datetime
import json
import six
from apitools.base.protorpclite import message_types
from apitools.base.protorpclite import messages
from apitools.base.protorpclite import protojson
from apitools.base.py import exceptions
_Codec = collections.namedtuple('_Codec', ['encoder', 'decoder'])
CodecResult = collections.namedtuple('CodecResult', ['value', 'complete'])
class EdgeType(object):
"""The type of transition made by an edge."""
SCALAR = 1
REPEATED = 2
MAP = 3
class ProtoEdge(collections.namedtuple('ProtoEdge',
['type_', 'field', 'index'])):
"""A description of a one-level transition from a message to a value.
Protobuf messages can be arbitrarily nested as fields can be defined with
any "message" type. This nesting property means that there are often many
levels of proto messages within a single message instance. This class can
unambiguously describe a single step from a message to some nested value.
Properties:
type_: EdgeType, The type of transition represented by this edge.
field: str, The name of the message-typed field.
index: Any, Additional data needed to make the transition. The semantics
of the "index" property change based on the value of "type_":
SCALAR: ignored.
REPEATED: a numeric index into "field"'s list.
MAP: a key into "field"'s mapping.
"""
__slots__ = ()
def __str__(self):
if self.type_ == EdgeType.SCALAR:
return self.field
else:
return '{}[{}]'.format(self.field, self.index)
# TODO(craigcitro): Make these non-global.
_UNRECOGNIZED_FIELD_MAPPINGS = {}
_CUSTOM_MESSAGE_CODECS = {}
_CUSTOM_FIELD_CODECS = {}
_FIELD_TYPE_CODECS = {}
def MapUnrecognizedFields(field_name):
"""Register field_name as a container for unrecognized fields."""
def Register(cls):
_UNRECOGNIZED_FIELD_MAPPINGS[cls] = field_name
return cls
return Register
def RegisterCustomMessageCodec(encoder, decoder):
"""Register a custom encoder/decoder for this message class."""
def Register(cls):
_CUSTOM_MESSAGE_CODECS[cls] = _Codec(encoder=encoder, decoder=decoder)
return cls
return Register
def RegisterCustomFieldCodec(encoder, decoder):
"""Register a custom encoder/decoder for this field."""
def Register(field):
_CUSTOM_FIELD_CODECS[field] = _Codec(encoder=encoder, decoder=decoder)
return field
return Register
def RegisterFieldTypeCodec(encoder, decoder):
"""Register a custom encoder/decoder for all fields of this type."""
def Register(field_type):
_FIELD_TYPE_CODECS[field_type] = _Codec(
encoder=encoder, decoder=decoder)
return field_type
return Register
def CopyProtoMessage(message):
"""Make a deep copy of a message."""
return JsonToMessage(type(message), MessageToJson(message))
def MessageToJson(message, include_fields=None):
"""Convert the given message to JSON."""
result = _ProtoJsonApiTools.Get().encode_message(message)
return _IncludeFields(result, message, include_fields)
def JsonToMessage(message_type, message):
"""Convert the given JSON to a message of type message_type."""
return _ProtoJsonApiTools.Get().decode_message(message_type, message)
# TODO(craigcitro): Do this directly, instead of via JSON.
def DictToMessage(d, message_type):
"""Convert the given dictionary to a message of type message_type."""
return JsonToMessage(message_type, json.dumps(d))
def MessageToDict(message):
"""Convert the given message to a dictionary."""
return json.loads(MessageToJson(message))
def DictToAdditionalPropertyMessage(properties, additional_property_type,
sort_items=False):
"""Convert the given dictionary to an AdditionalProperty message."""
items = properties.items()
if sort_items:
items = sorted(items)
map_ = []
for key, value in items:
map_.append(additional_property_type.AdditionalProperty(
key=key, value=value))
return additional_property_type(additionalProperties=map_)
def PyValueToMessage(message_type, value):
"""Convert the given python value to a message of type message_type."""
return JsonToMessage(message_type, json.dumps(value))
def MessageToPyValue(message):
"""Convert the given message to a python value."""
return json.loads(MessageToJson(message))
def MessageToRepr(msg, multiline=False, **kwargs):
"""Return a repr-style string for a protorpc message.
protorpc.Message.__repr__ does not return anything that could be considered
python code. Adding this function lets us print a protorpc message in such
a way that it could be pasted into code later, and used to compare against
other things.
Args:
msg: protorpc.Message, the message to be repr'd.
multiline: bool, True if the returned string should have each field
assignment on its own line.
**kwargs: {str:str}, Additional flags for how to format the string.
Known **kwargs:
shortstrings: bool, True if all string values should be
truncated at 100 characters, since when mocking the contents
typically don't matter except for IDs, and IDs are usually
less than 100 characters.
no_modules: bool, True if the long module name should not be printed with
each type.
Returns:
str, A string of valid python (assuming the right imports have been made)
that recreates the message passed into this function.
"""
# TODO(jasmuth): craigcitro suggests a pretty-printer from apitools/gen.
indent = kwargs.get('indent', 0)
def IndentKwargs(kwargs):
kwargs = dict(kwargs)
kwargs['indent'] = kwargs.get('indent', 0) + 4
return kwargs
if isinstance(msg, list):
s = '['
for item in msg:
if multiline:
s += '\n' + ' ' * (indent + 4)
s += MessageToRepr(
item, multiline=multiline, **IndentKwargs(kwargs)) + ','
if multiline:
s += '\n' + ' ' * indent
s += ']'
return s
if isinstance(msg, messages.Message):
s = type(msg).__name__ + '('
if not kwargs.get('no_modules'):
s = msg.__module__ + '.' + s
names = sorted([field.name for field in msg.all_fields()])
for name in names:
field = msg.field_by_name(name)
if multiline:
s += '\n' + ' ' * (indent + 4)
value = getattr(msg, field.name)
s += field.name + '=' + MessageToRepr(
value, multiline=multiline, **IndentKwargs(kwargs)) + ','
if multiline:
s += '\n' + ' ' * indent
s += ')'
return s
if isinstance(msg, six.string_types):
if kwargs.get('shortstrings') and len(msg) > 100:
msg = msg[:100]
if isinstance(msg, datetime.datetime):
class SpecialTZInfo(datetime.tzinfo):
def __init__(self, offset):
super(SpecialTZInfo, self).__init__()
self.offset = offset
def __repr__(self):
s = 'TimeZoneOffset(' + repr(self.offset) + ')'
if not kwargs.get('no_modules'):
s = 'apitools.base.protorpclite.util.' + s
return s
msg = datetime.datetime(
msg.year, msg.month, msg.day, msg.hour, msg.minute, msg.second,
msg.microsecond, SpecialTZInfo(msg.tzinfo.utcoffset(0)))
return repr(msg)
def _GetField(message, field_path):
for field in field_path:
if field not in dir(message):
raise KeyError('no field "%s"' % field)
message = getattr(message, field)
return message
def _SetField(dictblob, field_path, value):
for field in field_path[:-1]:
dictblob = dictblob.setdefault(field, {})
dictblob[field_path[-1]] = value
def _IncludeFields(encoded_message, message, include_fields):
"""Add the requested fields to the encoded message."""
if include_fields is None:
return encoded_message
result = json.loads(encoded_message)
for field_name in include_fields:
try:
value = _GetField(message, field_name.split('.'))
nullvalue = None
if isinstance(value, list):
nullvalue = []
except KeyError:
raise exceptions.InvalidDataError(
'No field named %s in message of type %s' % (
field_name, type(message)))
_SetField(result, field_name.split('.'), nullvalue)
return json.dumps(result)
def _GetFieldCodecs(field, attr):
result = [
getattr(_CUSTOM_FIELD_CODECS.get(field), attr, None),
getattr(_FIELD_TYPE_CODECS.get(type(field)), attr, None),
]
return [x for x in result if x is not None]
class _ProtoJsonApiTools(protojson.ProtoJson):
"""JSON encoder used by apitools clients."""
_INSTANCE = None
@classmethod
def Get(cls):
if cls._INSTANCE is None:
cls._INSTANCE = cls()
return cls._INSTANCE
def decode_message(self, message_type, encoded_message):
if message_type in _CUSTOM_MESSAGE_CODECS:
return _CUSTOM_MESSAGE_CODECS[
message_type].decoder(encoded_message)
result = _DecodeCustomFieldNames(message_type, encoded_message)
result = super(_ProtoJsonApiTools, self).decode_message(
message_type, result)
result = _ProcessUnknownEnums(result, encoded_message)
result = _ProcessUnknownMessages(result, encoded_message)
return _DecodeUnknownFields(result, encoded_message)
def decode_field(self, field, value):
"""Decode the given JSON value.
Args:
field: a messages.Field for the field we're decoding.
value: a python value we'd like to decode.
Returns:
A value suitable for assignment to field.
"""
for decoder in _GetFieldCodecs(field, 'decoder'):
result = decoder(field, value)
value = result.value
if result.complete:
return value
if isinstance(field, messages.MessageField):
field_value = self.decode_message(
field.message_type, json.dumps(value))
elif isinstance(field, messages.EnumField):
value = GetCustomJsonEnumMapping(
field.type, json_name=value) or value
try:
field_value = super(
_ProtoJsonApiTools, self).decode_field(field, value)
except messages.DecodeError:
if not isinstance(value, six.string_types):
raise
field_value = None
else:
field_value = super(
_ProtoJsonApiTools, self).decode_field(field, value)
return field_value
def encode_message(self, message):
if isinstance(message, messages.FieldList):
return '[%s]' % (', '.join(self.encode_message(x)
for x in message))
# pylint: disable=unidiomatic-typecheck
if type(message) in _CUSTOM_MESSAGE_CODECS:
return _CUSTOM_MESSAGE_CODECS[type(message)].encoder(message)
message = _EncodeUnknownFields(message)
result = super(_ProtoJsonApiTools, self).encode_message(message)
result = _EncodeCustomFieldNames(message, result)
return json.dumps(json.loads(result), sort_keys=True)
def encode_field(self, field, value):
"""Encode the given value as JSON.
Args:
field: a messages.Field for the field we're encoding.
value: a value for field.
Returns:
A python value suitable for json.dumps.
"""
for encoder in _GetFieldCodecs(field, 'encoder'):
result = encoder(field, value)
value = result.value
if result.complete:
return value
if isinstance(field, messages.EnumField):
if field.repeated:
remapped_value = [GetCustomJsonEnumMapping(
field.type, python_name=e.name) or e.name for e in value]
else:
remapped_value = GetCustomJsonEnumMapping(
field.type, python_name=value.name)
if remapped_value:
return remapped_value
if (isinstance(field, messages.MessageField) and
not isinstance(field, message_types.DateTimeField)):
value = json.loads(self.encode_message(value))
return super(_ProtoJsonApiTools, self).encode_field(field, value)
# TODO(craigcitro): Fold this and _IncludeFields in as codecs.
def _DecodeUnknownFields(message, encoded_message):
"""Rewrite unknown fields in message into message.destination."""
destination = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if destination is None:
return message
pair_field = message.field_by_name(destination)
if not isinstance(pair_field, messages.MessageField):
raise exceptions.InvalidDataFromServerError(
'Unrecognized fields must be mapped to a compound '
'message type.')
pair_type = pair_field.message_type
# TODO(craigcitro): Add more error checking around the pair
# type being exactly what we suspect (field names, etc).
if isinstance(pair_type.value, messages.MessageField):
new_values = _DecodeUnknownMessages(
message, json.loads(encoded_message), pair_type)
else:
new_values = _DecodeUnrecognizedFields(message, pair_type)
setattr(message, destination, new_values)
# We could probably get away with not setting this, but
# why not clear it?
setattr(message, '_Message__unrecognized_fields', {})
return message
def _DecodeUnknownMessages(message, encoded_message, pair_type):
"""Process unknown fields in encoded_message of a message type."""
field_type = pair_type.value.type
new_values = []
all_field_names = [x.name for x in message.all_fields()]
for name, value_dict in six.iteritems(encoded_message):
if name in all_field_names:
continue
value = PyValueToMessage(field_type, value_dict)
if pair_type.value.repeated:
value = _AsMessageList(value)
new_pair = pair_type(key=name, value=value)
new_values.append(new_pair)
return new_values
def _DecodeUnrecognizedFields(message, pair_type):
"""Process unrecognized fields in message."""
new_values = []
codec = _ProtoJsonApiTools.Get()
for unknown_field in message.all_unrecognized_fields():
# TODO(craigcitro): Consider validating the variant if
# the assignment below doesn't take care of it. It may
# also be necessary to check it in the case that the
# type has multiple encodings.
value, _ = message.get_unrecognized_field_info(unknown_field)
value_type = pair_type.field_by_name('value')
if isinstance(value_type, messages.MessageField):
decoded_value = DictToMessage(value, pair_type.value.message_type)
else:
decoded_value = codec.decode_field(
pair_type.value, value)
try:
new_pair_key = str(unknown_field)
except UnicodeEncodeError:
new_pair_key = protojson.ProtoJson().decode_field(
pair_type.key, unknown_field)
new_pair = pair_type(key=new_pair_key, value=decoded_value)
new_values.append(new_pair)
return new_values
def _CopyProtoMessageVanillaProtoJson(message):
codec = protojson.ProtoJson()
return codec.decode_message(type(message), codec.encode_message(message))
def _EncodeUnknownFields(message):
"""Remap unknown fields in message out of message.source."""
source = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if source is None:
return message
# CopyProtoMessage uses _ProtoJsonApiTools, which uses this message. Use
# the vanilla protojson-based copy function to avoid infinite recursion.
result = _CopyProtoMessageVanillaProtoJson(message)
pairs_field = message.field_by_name(source)
if not isinstance(pairs_field, messages.MessageField):
raise exceptions.InvalidUserInputError(
'Invalid pairs field %s' % pairs_field)
pairs_type = pairs_field.message_type
value_field = pairs_type.field_by_name('value')
value_variant = value_field.variant
pairs = getattr(message, source)
codec = _ProtoJsonApiTools.Get()
for pair in pairs:
encoded_value = codec.encode_field(value_field, pair.value)
result.set_unrecognized_field(pair.key, encoded_value, value_variant)
setattr(result, source, [])
return result
def _SafeEncodeBytes(field, value):
"""Encode the bytes in value as urlsafe base64."""
try:
if field.repeated:
result = [base64.urlsafe_b64encode(byte) for byte in value]
else:
result = base64.urlsafe_b64encode(value)
complete = True
except TypeError:
result = value
complete = False
return CodecResult(value=result, complete=complete)
def _SafeDecodeBytes(unused_field, value):
"""Decode the urlsafe base64 value into bytes."""
try:
result = base64.urlsafe_b64decode(str(value))
complete = True
except TypeError:
result = value
complete = False
return CodecResult(value=result, complete=complete)
def _ProcessUnknownEnums(message, encoded_message):
"""Add unknown enum values from encoded_message as unknown fields.
ProtoRPC diverges from the usual protocol buffer behavior here and
doesn't allow unknown fields. Throwing on unknown fields makes it
impossible to let servers add new enum values and stay compatible
with older clients, which isn't reasonable for us. We simply store
unrecognized enum values as unknown fields, and all is well.
Args:
message: Proto message we've decoded thus far.
encoded_message: JSON string we're decoding.
Returns:
message, with any unknown enums stored as unrecognized fields.
"""
if not encoded_message:
return message
decoded_message = json.loads(six.ensure_str(encoded_message))
for field in message.all_fields():
if (isinstance(field, messages.EnumField) and
field.name in decoded_message and
message.get_assigned_value(field.name) is None):
message.set_unrecognized_field(
field.name, decoded_message[field.name], messages.Variant.ENUM)
return message
def _ProcessUnknownMessages(message, encoded_message):
"""Store any remaining unknown fields as strings.
ProtoRPC currently ignores unknown values for which no type can be
determined (and logs a "No variant found" message). For the purposes
of reserializing, this is quite harmful (since it throws away
information). Here we simply add those as unknown fields of type
string (so that they can easily be reserialized).
Args:
message: Proto message we've decoded thus far.
encoded_message: JSON string we're decoding.
Returns:
message, with any remaining unrecognized fields saved.
"""
if not encoded_message:
return message
decoded_message = json.loads(six.ensure_str(encoded_message))
message_fields = [x.name for x in message.all_fields()] + list(
message.all_unrecognized_fields())
missing_fields = [x for x in decoded_message.keys()
if x not in message_fields]
for field_name in missing_fields:
message.set_unrecognized_field(field_name, decoded_message[field_name],
messages.Variant.STRING)
return message
RegisterFieldTypeCodec(_SafeEncodeBytes, _SafeDecodeBytes)(messages.BytesField)
# Note that these could share a dictionary, since they're keyed by
# distinct types, but it's not really worth it.
_JSON_ENUM_MAPPINGS = {}
_JSON_FIELD_MAPPINGS = {}
def AddCustomJsonEnumMapping(enum_type, python_name, json_name,
package=None): # pylint: disable=unused-argument
"""Add a custom wire encoding for a given enum value.
This is primarily used in generated code, to handle enum values
which happen to be Python keywords.
Args:
enum_type: (messages.Enum) An enum type
python_name: (basestring) Python name for this value.
json_name: (basestring) JSON name to be used on the wire.
package: (NoneType, optional) No effect, exists for legacy compatibility.
"""
if not issubclass(enum_type, messages.Enum):
raise exceptions.TypecheckError(
'Cannot set JSON enum mapping for non-enum "%s"' % enum_type)
if python_name not in enum_type.names():
raise exceptions.InvalidDataError(
'Enum value %s not a value for type %s' % (python_name, enum_type))
field_mappings = _JSON_ENUM_MAPPINGS.setdefault(enum_type, {})
_CheckForExistingMappings('enum', enum_type, python_name, json_name)
field_mappings[python_name] = json_name
def AddCustomJsonFieldMapping(message_type, python_name, json_name,
package=None): # pylint: disable=unused-argument
"""Add a custom wire encoding for a given message field.
This is primarily used in generated code, to handle enum values
which happen to be Python keywords.
Args:
message_type: (messages.Message) A message type
python_name: (basestring) Python name for this value.
json_name: (basestring) JSON name to be used on the wire.
package: (NoneType, optional) No effect, exists for legacy compatibility.
"""
if not issubclass(message_type, messages.Message):
raise exceptions.TypecheckError(
'Cannot set JSON field mapping for '
'non-message "%s"' % message_type)
try:
_ = message_type.field_by_name(python_name)
except KeyError:
raise exceptions.InvalidDataError(
'Field %s not recognized for type %s' % (
python_name, message_type))
field_mappings = _JSON_FIELD_MAPPINGS.setdefault(message_type, {})
_CheckForExistingMappings('field', message_type, python_name, json_name)
field_mappings[python_name] = json_name
def GetCustomJsonEnumMapping(enum_type, python_name=None, json_name=None):
"""Return the appropriate remapping for the given enum, or None."""
return _FetchRemapping(enum_type, 'enum',
python_name=python_name, json_name=json_name,
mappings=_JSON_ENUM_MAPPINGS)
def GetCustomJsonFieldMapping(message_type, python_name=None, json_name=None):
"""Return the appropriate remapping for the given field, or None."""
return _FetchRemapping(message_type, 'field',
python_name=python_name, json_name=json_name,
mappings=_JSON_FIELD_MAPPINGS)
def _FetchRemapping(type_name, mapping_type, python_name=None, json_name=None,
mappings=None):
"""Common code for fetching a key or value from a remapping dict."""
if python_name and json_name:
raise exceptions.InvalidDataError(
'Cannot specify both python_name and json_name '
'for %s remapping' % mapping_type)
if not (python_name or json_name):
raise exceptions.InvalidDataError(
'Must specify either python_name or json_name for %s remapping' % (
mapping_type,))
field_remappings = mappings.get(type_name, {})
if field_remappings:
if python_name:
return field_remappings.get(python_name)
elif json_name:
if json_name in list(field_remappings.values()):
return [k for k in field_remappings
if field_remappings[k] == json_name][0]
return None
def _CheckForExistingMappings(mapping_type, message_type,
python_name, json_name):
"""Validate that no mappings exist for the given values."""
if mapping_type == 'field':
getter = GetCustomJsonFieldMapping
elif mapping_type == 'enum':
getter = GetCustomJsonEnumMapping
remapping = getter(message_type, python_name=python_name)
if remapping is not None and remapping != json_name:
raise exceptions.InvalidDataError(
'Cannot add mapping for %s "%s", already mapped to "%s"' % (
mapping_type, python_name, remapping))
remapping = getter(message_type, json_name=json_name)
if remapping is not None and remapping != python_name:
raise exceptions.InvalidDataError(
'Cannot add mapping for %s "%s", already mapped to "%s"' % (
mapping_type, json_name, remapping))
def _EncodeCustomFieldNames(message, encoded_value):
field_remappings = list(_JSON_FIELD_MAPPINGS.get(type(message), {})
.items())
if field_remappings:
decoded_value = json.loads(encoded_value)
for python_name, json_name in field_remappings:
if python_name in encoded_value:
decoded_value[json_name] = decoded_value.pop(python_name)
encoded_value = json.dumps(decoded_value)
return encoded_value
def _DecodeCustomFieldNames(message_type, encoded_message):
field_remappings = _JSON_FIELD_MAPPINGS.get(message_type, {})
if field_remappings:
decoded_message = json.loads(encoded_message)
for python_name, json_name in list(field_remappings.items()):
if json_name in decoded_message:
decoded_message[python_name] = decoded_message.pop(json_name)
encoded_message = json.dumps(decoded_message)
return encoded_message
def _AsMessageList(msg):
"""Convert the provided list-as-JsonValue to a list."""
# This really needs to live in extra_types, but extra_types needs
# to import this file to be able to register codecs.
# TODO(craigcitro): Split out a codecs module and fix this ugly
# import.
from apitools.base.py import extra_types
def _IsRepeatedJsonValue(msg):
"""Return True if msg is a repeated value as a JsonValue."""
if isinstance(msg, extra_types.JsonArray):
return True
if isinstance(msg, extra_types.JsonValue) and msg.array_value:
return True
return False
if not _IsRepeatedJsonValue(msg):
raise ValueError('invalid argument to _AsMessageList')
if isinstance(msg, extra_types.JsonValue):
msg = msg.array_value
if isinstance(msg, extra_types.JsonArray):
msg = msg.entries
return msg
def _IsMap(message, field):
"""Returns whether the "field" is actually a map-type."""
value = message.get_assigned_value(field.name)
if not isinstance(value, messages.Message):
return False
try:
additional_properties = value.field_by_name('additionalProperties')
except KeyError:
return False
else:
return additional_properties.repeated
def _MapItems(message, field):
"""Yields the (key, value) pair of the map values."""
assert _IsMap(message, field)
map_message = message.get_assigned_value(field.name)
additional_properties = map_message.get_assigned_value(
'additionalProperties')
for kv_pair in additional_properties:
yield kv_pair.key, kv_pair.value
def UnrecognizedFieldIter(message, _edges=()): # pylint: disable=invalid-name
"""Yields the locations of unrecognized fields within "message".
If a sub-message is found to have unrecognized fields, that sub-message
will not be searched any further. We prune the search of the sub-message
because we assume it is malformed and further checks will not yield
productive errors.
Args:
message: The Message instance to search.
_edges: Internal arg for passing state.
Yields:
(edges_to_message, field_names):
edges_to_message: List[ProtoEdge], The edges (relative to "message")
describing the path to the sub-message where the unrecognized
fields were found.
field_names: List[Str], The names of the field(s) that were
unrecognized in the sub-message.
"""
if not isinstance(message, messages.Message):
# This is a primitive leaf, no errors found down this path.
return
field_names = message.all_unrecognized_fields()
if field_names:
# This message is malformed. Stop recursing and report it.
yield _edges, field_names
return
# Recurse through all fields in the current message.
for field in message.all_fields():
value = message.get_assigned_value(field.name)
if field.repeated:
for i, item in enumerate(value):
repeated_edge = ProtoEdge(EdgeType.REPEATED, field.name, i)
iter_ = UnrecognizedFieldIter(item, _edges + (repeated_edge,))
for (e, y) in iter_:
yield e, y
elif _IsMap(message, field):
for key, item in _MapItems(message, field):
map_edge = ProtoEdge(EdgeType.MAP, field.name, key)
iter_ = UnrecognizedFieldIter(item, _edges + (map_edge,))
for (e, y) in iter_:
yield e, y
else:
scalar_edge = ProtoEdge(EdgeType.SCALAR, field.name, None)
iter_ = UnrecognizedFieldIter(value, _edges + (scalar_edge,))
for (e, y) in iter_:
yield e, y
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/apitools/apitools/base/py/encoding_helper.py
| 0.678007 | 0.204878 |
encoding_helper.py
|
pypi
|
from apitools.base.py import exceptions
class StreamSlice(object):
"""Provides a slice-like object for streams."""
def __init__(self, stream, max_bytes):
self.__stream = stream
self.__remaining_bytes = max_bytes
self.__max_bytes = max_bytes
def __str__(self):
return 'Slice of stream %s with %s/%s bytes not yet read' % (
self.__stream, self.__remaining_bytes, self.__max_bytes)
def __len__(self):
return self.__max_bytes
def __nonzero__(self):
# For 32-bit python2.x, len() cannot exceed a 32-bit number; avoid
# accidental len() calls from httplib in the form of "if this_object:".
return bool(self.__max_bytes)
@property
def length(self):
# For 32-bit python2.x, len() cannot exceed a 32-bit number.
return self.__max_bytes
def read(self, size=None): # pylint: disable=missing-docstring
"""Read at most size bytes from this slice.
Compared to other streams, there is one case where we may
unexpectedly raise an exception on read: if the underlying stream
is exhausted (i.e. returns no bytes on read), and the size of this
slice indicates we should still be able to read more bytes, we
raise exceptions.StreamExhausted.
Args:
size: If provided, read no more than size bytes from the stream.
Returns:
The bytes read from this slice.
Raises:
exceptions.StreamExhausted
"""
if size is not None:
read_size = min(size, self.__remaining_bytes)
else:
read_size = self.__remaining_bytes
data = self.__stream.read(read_size)
if read_size > 0 and not data:
raise exceptions.StreamExhausted(
'Not enough bytes in stream; expected %d, exhausted '
'after %d' % (
self.__max_bytes,
self.__max_bytes - self.__remaining_bytes))
self.__remaining_bytes -= len(data)
return data
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/apitools/apitools/base/py/stream_slice.py
| 0.892592 | 0.238794 |
stream_slice.py
|
pypi
|
"""Common utility library."""
from __future__ import with_statement
import datetime
import functools
import inspect
import logging
import os
import re
import sys
import six
__all__ = [
'Error',
'decode_datetime',
'get_package_for_module',
'positional',
'TimeZoneOffset',
'total_seconds',
]
class Error(Exception):
"""Base class for protorpc exceptions."""
_TIME_ZONE_RE_STRING = r"""
# Examples:
# +01:00
# -05:30
# Z12:00
((?P<z>Z) | (?P<sign>[-+])
(?P<hours>\d\d) :
(?P<minutes>\d\d))$
"""
_TIME_ZONE_RE = re.compile(_TIME_ZONE_RE_STRING, re.IGNORECASE | re.VERBOSE)
def positional(max_positional_args):
"""A decorator that declares only the first N arguments may be positional.
This decorator makes it easy to support Python 3 style keyword-only
parameters. For example, in Python 3 it is possible to write:
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after * must be a keyword:
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example:
To define a function like above, do:
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it
becomes a required keyword argument:
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter:
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
'self' and 'cls':
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
One can omit the argument to 'positional' altogether, and then no
arguments with default values may be passed positionally. This
would be equivalent to placing a '*' before the first argument
with a default value in Python 3. If there are no arguments with
default values, and no argument is given to 'positional', an error
is raised.
@positional
def fn(arg1, arg2, required_kw1=None, required_kw2=0):
...
fn(1, 3, 5) # Raises exception.
fn(1, 3) # Ok.
fn(1, 3, required_kw1=5) # Ok.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args from
being used as positional parameters.
Raises:
TypeError if a keyword-only argument is provided as a positional
parameter.
ValueError if no maximum number of arguments is provided and the function
has no arguments with default values.
"""
def positional_decorator(wrapped):
"""Creates a function wraper to enforce number of arguments."""
@functools.wraps(wrapped)
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ''
if max_positional_args != 1:
plural_s = 's'
raise TypeError('%s() takes at most %d positional argument%s '
'(%d given)' % (wrapped.__name__,
max_positional_args,
plural_s, len(args)))
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, six.integer_types):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
if defaults is None:
raise ValueError(
'Functions with no keyword arguments must specify '
'max_positional_args')
return positional(len(args) - len(defaults))(max_positional_args)
@positional(1)
def get_package_for_module(module):
"""Get package name for a module.
Helper calculates the package name of a module.
Args:
module: Module to get name for. If module is a string, try to find
module in sys.modules.
Returns:
If module contains 'package' attribute, uses that as package name.
Else, if module is not the '__main__' module, the module __name__.
Else, the base name of the module file name. Else None.
"""
if isinstance(module, six.string_types):
try:
module = sys.modules[module]
except KeyError:
return None
try:
return six.text_type(module.package)
except AttributeError:
if module.__name__ == '__main__':
try:
file_name = module.__file__
except AttributeError:
pass
else:
base_name = os.path.basename(file_name)
split_name = os.path.splitext(base_name)
if len(split_name) == 1:
return six.text_type(base_name)
return u'.'.join(split_name[:-1])
return six.text_type(module.__name__)
def total_seconds(offset):
"""Backport of offset.total_seconds() from python 2.7+."""
seconds = offset.days * 24 * 60 * 60 + offset.seconds
microseconds = seconds * 10**6 + offset.microseconds
return microseconds / (10**6 * 1.0)
class TimeZoneOffset(datetime.tzinfo):
"""Time zone information as encoded/decoded for DateTimeFields."""
def __init__(self, offset):
"""Initialize a time zone offset.
Args:
offset: Integer or timedelta time zone offset, in minutes from UTC.
This can be negative.
"""
super(TimeZoneOffset, self).__init__()
if isinstance(offset, datetime.timedelta):
offset = total_seconds(offset) / 60
self.__offset = offset
def utcoffset(self, _):
"""Get the a timedelta with the time zone's offset from UTC.
Returns:
The time zone offset from UTC, as a timedelta.
"""
return datetime.timedelta(minutes=self.__offset)
def dst(self, _):
"""Get the daylight savings time offset.
The formats that ProtoRPC uses to encode/decode time zone
information don't contain any information about daylight
savings time. So this always returns a timedelta of 0.
Returns:
A timedelta of 0.
"""
return datetime.timedelta(0)
def decode_datetime(encoded_datetime, truncate_time=False):
"""Decode a DateTimeField parameter from a string to a python datetime.
Args:
encoded_datetime: A string in RFC 3339 format.
truncate_time: If true, truncate time string with precision higher than
microsecs.
Returns:
A datetime object with the date and time specified in encoded_datetime.
Raises:
ValueError: If the string is not in a recognized format.
"""
# Check if the string includes a time zone offset. Break out the
# part that doesn't include time zone info. Convert to uppercase
# because all our comparisons should be case-insensitive.
time_zone_match = _TIME_ZONE_RE.search(encoded_datetime)
if time_zone_match:
time_string = encoded_datetime[:time_zone_match.start(1)].upper()
else:
time_string = encoded_datetime.upper()
if '.' in time_string:
format_string = '%Y-%m-%dT%H:%M:%S.%f'
else:
format_string = '%Y-%m-%dT%H:%M:%S'
try:
decoded_datetime = datetime.datetime.strptime(time_string,
format_string)
except ValueError:
if truncate_time and '.' in time_string:
datetime_string, decimal_secs = time_string.split('.')
if len(decimal_secs) > 6:
# datetime can handle only microsecs precision.
truncated_time_string = '{}.{}'.format(
datetime_string, decimal_secs[:6])
decoded_datetime = datetime.datetime.strptime(
truncated_time_string,
format_string)
logging.warning(
'Truncating the datetime string from %s to %s',
time_string, truncated_time_string)
else:
raise
else:
raise
if not time_zone_match:
return decoded_datetime
# Time zone info was included in the parameter. Add a tzinfo
# object to the datetime. Datetimes can't be changed after they're
# created, so we'll need to create a new one.
if time_zone_match.group('z'):
offset_minutes = 0
else:
sign = time_zone_match.group('sign')
hours, minutes = [int(value) for value in
time_zone_match.group('hours', 'minutes')]
offset_minutes = hours * 60 + minutes
if sign == '-':
offset_minutes *= -1
return datetime.datetime(decoded_datetime.year,
decoded_datetime.month,
decoded_datetime.day,
decoded_datetime.hour,
decoded_datetime.minute,
decoded_datetime.second,
decoded_datetime.microsecond,
TimeZoneOffset(offset_minutes))
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/apitools/apitools/base/protorpclite/util.py
| 0.80077 | 0.195844 |
util.py
|
pypi
|
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
package = 'dns'
class Change(_messages.Message):
r"""An atomic update to a collection of ResourceRecordSets.
Enums:
StatusValueValuesEnum: Status of the operation (output only).
Fields:
additions: Which ResourceRecordSets to add?
deletions: Which ResourceRecordSets to remove? Must match existing data
exactly.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Identifies what kind of resource this is. Value: the fixed string
"dns#change".
startTime: The time that this operation was started by the server. This is
in RFC3339 text format.
status: Status of the operation (output only).
"""
class StatusValueValuesEnum(_messages.Enum):
r"""Status of the operation (output only).
Values:
done: <no description>
pending: <no description>
"""
done = 0
pending = 1
additions = _messages.MessageField('ResourceRecordSet', 1, repeated=True)
deletions = _messages.MessageField('ResourceRecordSet', 2, repeated=True)
id = _messages.StringField(3)
kind = _messages.StringField(4, default=u'dns#change')
startTime = _messages.StringField(5)
status = _messages.EnumField('StatusValueValuesEnum', 6)
class ChangesListResponse(_messages.Message):
r"""The response to a request to enumerate Changes to a ResourceRecordSets
collection.
Fields:
changes: The requested changes.
kind: Type of resource.
nextPageToken: The presence of this field indicates that there exist more
results following your last page of results in pagination order. To
fetch them, make another list request using this value as your
pagination token. In this way you can retrieve the complete contents of
even very large collections one page at a time. However, if the contents
of the collection change between the first and last paginated list
request, the set of all elements returned will be an inconsistent view
of the collection. There is no way to retrieve a "snapshot" of
collections larger than the maximum page size.
"""
changes = _messages.MessageField('Change', 1, repeated=True)
kind = _messages.StringField(2, default=u'dns#changesListResponse')
nextPageToken = _messages.StringField(3)
class DnsChangesCreateRequest(_messages.Message):
r"""A DnsChangesCreateRequest object.
Fields:
change: A Change resource to be passed as the request body.
managedZone: Identifies the managed zone addressed by this request. Can be
the managed zone name or id.
project: Identifies the project addressed by this request.
"""
change = _messages.MessageField('Change', 1)
managedZone = _messages.StringField(2, required=True)
project = _messages.StringField(3, required=True)
class DnsChangesGetRequest(_messages.Message):
r"""A DnsChangesGetRequest object.
Fields:
changeId: The identifier of the requested change, from a previous
ResourceRecordSetsChangeResponse.
managedZone: Identifies the managed zone addressed by this request. Can be
the managed zone name or id.
project: Identifies the project addressed by this request.
"""
changeId = _messages.StringField(1, required=True)
managedZone = _messages.StringField(2, required=True)
project = _messages.StringField(3, required=True)
class DnsChangesListRequest(_messages.Message):
r"""A DnsChangesListRequest object.
Enums:
SortByValueValuesEnum: Sorting criterion. The only supported value is
change sequence.
Fields:
managedZone: Identifies the managed zone addressed by this request. Can be
the managed zone name or id.
maxResults: Optional. Maximum number of results to be returned. If
unspecified, the server will decide how many results to return.
pageToken: Optional. A tag returned by a previous list request that was
truncated. Use this parameter to continue a previous list request.
project: Identifies the project addressed by this request.
sortBy: Sorting criterion. The only supported value is change sequence.
sortOrder: Sorting order direction: 'ascending' or 'descending'.
"""
class SortByValueValuesEnum(_messages.Enum):
r"""Sorting criterion. The only supported value is change sequence.
Values:
changeSequence: <no description>
"""
changeSequence = 0
managedZone = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
project = _messages.StringField(4, required=True)
sortBy = _messages.EnumField('SortByValueValuesEnum', 5, default=u'changeSequence')
sortOrder = _messages.StringField(6)
class DnsManagedZonesCreateRequest(_messages.Message):
r"""A DnsManagedZonesCreateRequest object.
Fields:
managedZone: A ManagedZone resource to be passed as the request body.
project: Identifies the project addressed by this request.
"""
managedZone = _messages.MessageField('ManagedZone', 1)
project = _messages.StringField(2, required=True)
class DnsManagedZonesDeleteRequest(_messages.Message):
r"""A DnsManagedZonesDeleteRequest object.
Fields:
managedZone: Identifies the managed zone addressed by this request. Can be
the managed zone name or id.
project: Identifies the project addressed by this request.
"""
managedZone = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class DnsManagedZonesDeleteResponse(_messages.Message):
r"""An empty DnsManagedZonesDelete response."""
class DnsManagedZonesGetRequest(_messages.Message):
r"""A DnsManagedZonesGetRequest object.
Fields:
managedZone: Identifies the managed zone addressed by this request. Can be
the managed zone name or id.
project: Identifies the project addressed by this request.
"""
managedZone = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class DnsManagedZonesListRequest(_messages.Message):
r"""A DnsManagedZonesListRequest object.
Fields:
dnsName: Restricts the list to return only zones with this domain name.
maxResults: Optional. Maximum number of results to be returned. If
unspecified, the server will decide how many results to return.
pageToken: Optional. A tag returned by a previous list request that was
truncated. Use this parameter to continue a previous list request.
project: Identifies the project addressed by this request.
"""
dnsName = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
project = _messages.StringField(4, required=True)
class DnsProjectsGetRequest(_messages.Message):
r"""A DnsProjectsGetRequest object.
Fields:
project: Identifies the project addressed by this request.
"""
project = _messages.StringField(1, required=True)
class DnsResourceRecordSetsListRequest(_messages.Message):
r"""A DnsResourceRecordSetsListRequest object.
Fields:
managedZone: Identifies the managed zone addressed by this request. Can be
the managed zone name or id.
maxResults: Optional. Maximum number of results to be returned. If
unspecified, the server will decide how many results to return.
name: Restricts the list to return only records with this fully qualified
domain name.
pageToken: Optional. A tag returned by a previous list request that was
truncated. Use this parameter to continue a previous list request.
project: Identifies the project addressed by this request.
type: Restricts the list to return only records of this type. If present,
the "name" parameter must also be present.
"""
managedZone = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.INT32)
name = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
type = _messages.StringField(6)
class ManagedZone(_messages.Message):
r"""A zone is a subtree of the DNS namespace under one administrative
responsibility. A ManagedZone is a resource that represents a DNS zone
hosted by the Cloud DNS service.
Fields:
creationTime: The time that this resource was created on the server. This
is in RFC3339 text format. Output only.
description: A mutable string of at most 1024 characters associated with
this resource for the user's convenience. Has no effect on the managed
zone's function.
dnsName: The DNS name of this managed zone, for instance "example.com.".
id: Unique identifier for the resource; defined by the server (output
only)
kind: Identifies what kind of resource this is. Value: the fixed string
"dns#managedZone".
name: User assigned name for this resource. Must be unique within the
project. The name must be 1-32 characters long, must begin with a
letter, end with a letter or digit, and only contain lowercase letters,
digits or dashes.
nameServerSet: Optionally specifies the NameServerSet for this
ManagedZone. A NameServerSet is a set of DNS name servers that all host
the same ManagedZones. Most users will leave this field unset.
nameServers: Delegate your managed_zone to these virtual name servers;
defined by the server (output only)
"""
creationTime = _messages.StringField(1)
description = _messages.StringField(2)
dnsName = _messages.StringField(3)
id = _messages.IntegerField(4, variant=_messages.Variant.UINT64)
kind = _messages.StringField(5, default=u'dns#managedZone')
name = _messages.StringField(6)
nameServerSet = _messages.StringField(7)
nameServers = _messages.StringField(8, repeated=True)
class ManagedZonesListResponse(_messages.Message):
r"""A ManagedZonesListResponse object.
Fields:
kind: Type of resource.
managedZones: The managed zone resources.
nextPageToken: The presence of this field indicates that there exist more
results following your last page of results in pagination order. To
fetch them, make another list request using this value as your page
token. In this way you can retrieve the complete contents of even very
large collections one page at a time. However, if the contents of the
collection change between the first and last paginated list request, the
set of all elements returned will be an inconsistent view of the
collection. There is no way to retrieve a consistent snapshot of a
collection larger than the maximum page size.
"""
kind = _messages.StringField(1, default=u'dns#managedZonesListResponse')
managedZones = _messages.MessageField('ManagedZone', 2, repeated=True)
nextPageToken = _messages.StringField(3)
class Project(_messages.Message):
r"""A project resource. The project is a top level container for resources
including Cloud DNS ManagedZones. Projects can be created only in the APIs
console.
Fields:
id: User assigned unique identifier for the resource (output only).
kind: Identifies what kind of resource this is. Value: the fixed string
"dns#project".
number: Unique numeric identifier for the resource; defined by the server
(output only).
quota: Quotas assigned to this project (output only).
"""
id = _messages.StringField(1)
kind = _messages.StringField(2, default=u'dns#project')
number = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
quota = _messages.MessageField('Quota', 4)
class Quota(_messages.Message):
r"""Limits associated with a Project.
Fields:
kind: Identifies what kind of resource this is. Value: the fixed string
"dns#quota".
managedZones: Maximum allowed number of managed zones in the project.
resourceRecordsPerRrset: Maximum allowed number of ResourceRecords per
ResourceRecordSet.
rrsetAdditionsPerChange: Maximum allowed number of ResourceRecordSets to
add per ChangesCreateRequest.
rrsetDeletionsPerChange: Maximum allowed number of ResourceRecordSets to
delete per ChangesCreateRequest.
rrsetsPerManagedZone: Maximum allowed number of ResourceRecordSets per
zone in the project.
totalRrdataSizePerChange: Maximum allowed size for total rrdata in one
ChangesCreateRequest in bytes.
"""
kind = _messages.StringField(1, default=u'dns#quota')
managedZones = _messages.IntegerField(2, variant=_messages.Variant.INT32)
resourceRecordsPerRrset = _messages.IntegerField(3, variant=_messages.Variant.INT32)
rrsetAdditionsPerChange = _messages.IntegerField(4, variant=_messages.Variant.INT32)
rrsetDeletionsPerChange = _messages.IntegerField(5, variant=_messages.Variant.INT32)
rrsetsPerManagedZone = _messages.IntegerField(6, variant=_messages.Variant.INT32)
totalRrdataSizePerChange = _messages.IntegerField(7, variant=_messages.Variant.INT32)
class ResourceRecordSet(_messages.Message):
r"""A unit of data that will be returned by the DNS servers.
Fields:
kind: Identifies what kind of resource this is. Value: the fixed string
"dns#resourceRecordSet".
name: For example, www.example.com.
rrdatas: As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1).
ttl: Number of seconds that this ResourceRecordSet can be cached by
resolvers.
type: The identifier of a supported record type, for example, A, AAAA, MX,
TXT, and so on.
"""
kind = _messages.StringField(1, default=u'dns#resourceRecordSet')
name = _messages.StringField(2)
rrdatas = _messages.StringField(3, repeated=True)
ttl = _messages.IntegerField(4, variant=_messages.Variant.INT32)
type = _messages.StringField(5)
class ResourceRecordSetsListResponse(_messages.Message):
r"""A ResourceRecordSetsListResponse object.
Fields:
kind: Type of resource.
nextPageToken: The presence of this field indicates that there exist more
results following your last page of results in pagination order. To
fetch them, make another list request using this value as your
pagination token. In this way you can retrieve the complete contents of
even very large collections one page at a time. However, if the contents
of the collection change between the first and last paginated list
request, the set of all elements returned will be an inconsistent view
of the collection. There is no way to retrieve a consistent snapshot of
a collection larger than the maximum page size.
rrsets: The resource record set resources.
"""
kind = _messages.StringField(1, default=u'dns#resourceRecordSetsListResponse')
nextPageToken = _messages.StringField(2)
rrsets = _messages.MessageField('ResourceRecordSet', 3, repeated=True)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/apitools/samples/dns_sample/dns_v1/dns_v1_messages.py
| 0.862685 | 0.224225 |
dns_v1_messages.py
|
pypi
|
from __future__ import absolute_import
from apitools.base.protorpclite import message_types as _message_types
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'storage'
class Bucket(_messages.Message):
r"""A bucket.
Messages:
CorsValueListEntry: A CorsValueListEntry object.
LifecycleValue: The bucket's lifecycle configuration. See lifecycle
management for more information.
LoggingValue: The bucket's logging configuration, which defines the
destination bucket and optional name prefix for the current bucket's
logs.
OwnerValue: The owner of the bucket. This is always the project team's
owner group.
VersioningValue: The bucket's versioning configuration.
WebsiteValue: The bucket's website configuration, controlling how the
service behaves when accessing bucket contents as a web site. See the
Static Website Examples for more information.
Fields:
acl: Access controls on the bucket.
cors: The bucket's Cross-Origin Resource Sharing (CORS) configuration.
defaultObjectAcl: Default access controls to apply to new objects when no
ACL is provided.
etag: HTTP 1.1 Entity tag for the bucket.
id: The ID of the bucket.
kind: The kind of item this is. For buckets, this is always
storage#bucket.
lifecycle: The bucket's lifecycle configuration. See lifecycle management
for more information.
location: The location of the bucket. Object data for objects in the
bucket resides in physical storage within this region. Defaults to US.
See the developer's guide for the authoritative list.
logging: The bucket's logging configuration, which defines the destination
bucket and optional name prefix for the current bucket's logs.
metageneration: The metadata generation of this bucket.
name: The name of the bucket.
owner: The owner of the bucket. This is always the project team's owner
group.
projectNumber: The project number of the project the bucket belongs to.
selfLink: The URI of this bucket.
storageClass: The bucket's storage class. This defines how objects in the
bucket are stored and determines the SLA and the cost of storage. Values
include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to
STANDARD. For more information, see storage classes.
timeCreated: The creation time of the bucket in RFC 3339 format.
updated: The modification time of the bucket in RFC 3339 format.
versioning: The bucket's versioning configuration.
website: The bucket's website configuration, controlling how the service
behaves when accessing bucket contents as a web site. See the Static
Website Examples for more information.
"""
class CorsValueListEntry(_messages.Message):
r"""A CorsValueListEntry object.
Fields:
maxAgeSeconds: The value, in seconds, to return in the Access-Control-
Max-Age header used in preflight responses.
method: The list of HTTP methods on which to include CORS response
headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
of methods, and means "any method".
origin: The list of Origins eligible to receive CORS response headers.
Note: "*" is permitted in the list of origins, and means "any Origin".
responseHeader: The list of HTTP headers other than the simple response
headers to give permission for the user-agent to share across domains.
"""
maxAgeSeconds = _messages.IntegerField(1, variant=_messages.Variant.INT32)
method = _messages.StringField(2, repeated=True)
origin = _messages.StringField(3, repeated=True)
responseHeader = _messages.StringField(4, repeated=True)
class LifecycleValue(_messages.Message):
r"""The bucket's lifecycle configuration. See lifecycle management for
more information.
Messages:
RuleValueListEntry: A RuleValueListEntry object.
Fields:
rule: A lifecycle management rule, which is made of an action to take
and the condition(s) under which the action will be taken.
"""
class RuleValueListEntry(_messages.Message):
r"""A RuleValueListEntry object.
Messages:
ActionValue: The action to take.
ConditionValue: The condition(s) under which the action will be taken.
Fields:
action: The action to take.
condition: The condition(s) under which the action will be taken.
"""
class ActionValue(_messages.Message):
r"""The action to take.
Fields:
type: Type of the action. Currently, only Delete is supported.
"""
type = _messages.StringField(1)
class ConditionValue(_messages.Message):
r"""The condition(s) under which the action will be taken.
Fields:
age: Age of an object (in days). This condition is satisfied when an
object reaches the specified age.
createdBefore: A date in RFC 3339 format with only the date part
(for instance, "2013-01-15"). This condition is satisfied when an
object is created before midnight of the specified date in UTC.
isLive: Relevant only for versioned objects. If the value is true,
this condition matches live objects; if the value is false, it
matches archived objects.
numNewerVersions: Relevant only for versioned objects. If the value
is N, this condition is satisfied when there are at least N
versions (including the live version) newer than this version of
the object.
"""
age = _messages.IntegerField(1, variant=_messages.Variant.INT32)
createdBefore = extra_types.DateField(2)
isLive = _messages.BooleanField(3)
numNewerVersions = _messages.IntegerField(4, variant=_messages.Variant.INT32)
action = _messages.MessageField('ActionValue', 1)
condition = _messages.MessageField('ConditionValue', 2)
rule = _messages.MessageField('RuleValueListEntry', 1, repeated=True)
class LoggingValue(_messages.Message):
r"""The bucket's logging configuration, which defines the destination
bucket and optional name prefix for the current bucket's logs.
Fields:
logBucket: The destination bucket where the current bucket's logs should
be placed.
logObjectPrefix: A prefix for log object names.
"""
logBucket = _messages.StringField(1)
logObjectPrefix = _messages.StringField(2)
class OwnerValue(_messages.Message):
r"""The owner of the bucket. This is always the project team's owner
group.
Fields:
entity: The entity, in the form project-owner-projectId.
entityId: The ID for the entity.
"""
entity = _messages.StringField(1)
entityId = _messages.StringField(2)
class VersioningValue(_messages.Message):
r"""The bucket's versioning configuration.
Fields:
enabled: While set to true, versioning is fully enabled for this bucket.
"""
enabled = _messages.BooleanField(1)
class WebsiteValue(_messages.Message):
r"""The bucket's website configuration, controlling how the service
behaves when accessing bucket contents as a web site. See the Static
Website Examples for more information.
Fields:
mainPageSuffix: If the requested object path is missing, the service
will ensure the path has a trailing '/', append this suffix, and
attempt to retrieve the resulting object. This allows the creation of
index.html objects to represent directory pages.
notFoundPage: If the requested object path is missing, and any
mainPageSuffix object is missing, if applicable, the service will
return the named object from this bucket as the content for a 404 Not
Found result.
"""
mainPageSuffix = _messages.StringField(1)
notFoundPage = _messages.StringField(2)
acl = _messages.MessageField('BucketAccessControl', 1, repeated=True)
cors = _messages.MessageField('CorsValueListEntry', 2, repeated=True)
defaultObjectAcl = _messages.MessageField('ObjectAccessControl', 3, repeated=True)
etag = _messages.StringField(4)
id = _messages.StringField(5)
kind = _messages.StringField(6, default=u'storage#bucket')
lifecycle = _messages.MessageField('LifecycleValue', 7)
location = _messages.StringField(8)
logging = _messages.MessageField('LoggingValue', 9)
metageneration = _messages.IntegerField(10)
name = _messages.StringField(11)
owner = _messages.MessageField('OwnerValue', 12)
projectNumber = _messages.IntegerField(13, variant=_messages.Variant.UINT64)
selfLink = _messages.StringField(14)
storageClass = _messages.StringField(15)
timeCreated = _message_types.DateTimeField(16)
updated = _message_types.DateTimeField(17)
versioning = _messages.MessageField('VersioningValue', 18)
website = _messages.MessageField('WebsiteValue', 19)
class BucketAccessControl(_messages.Message):
r"""An access-control entry.
Messages:
ProjectTeamValue: The project team associated with the entity, if any.
Fields:
bucket: The name of the bucket.
domain: The domain associated with the entity, if any.
email: The email address associated with the entity, if any.
entity: The entity holding the permission, in one of the following forms:
- user-userId - user-email - group-groupId - group-email - domain-
domain - project-team-projectId - allUsers - allAuthenticatedUsers
Examples: - The user [email protected] would be [email protected]. -
The group [email protected] would be group-
[email protected]. - To refer to all members of the Google Apps
for Business domain example.com, the entity would be domain-example.com.
entityId: The ID for the entity, if any.
etag: HTTP 1.1 Entity tag for the access-control entry.
id: The ID of the access-control entry.
kind: The kind of item this is. For bucket access control entries, this is
always storage#bucketAccessControl.
projectTeam: The project team associated with the entity, if any.
role: The access permission for the entity. Can be READER, WRITER, or
OWNER.
selfLink: The link to this access-control entry.
"""
class ProjectTeamValue(_messages.Message):
r"""The project team associated with the entity, if any.
Fields:
projectNumber: The project number.
team: The team. Can be owners, editors, or viewers.
"""
projectNumber = _messages.StringField(1)
team = _messages.StringField(2)
bucket = _messages.StringField(1)
domain = _messages.StringField(2)
email = _messages.StringField(3)
entity = _messages.StringField(4)
entityId = _messages.StringField(5)
etag = _messages.StringField(6)
id = _messages.StringField(7)
kind = _messages.StringField(8, default=u'storage#bucketAccessControl')
projectTeam = _messages.MessageField('ProjectTeamValue', 9)
role = _messages.StringField(10)
selfLink = _messages.StringField(11)
class BucketAccessControls(_messages.Message):
r"""An access-control list.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of bucket access control
entries, this is always storage#bucketAccessControls.
"""
items = _messages.MessageField('BucketAccessControl', 1, repeated=True)
kind = _messages.StringField(2, default=u'storage#bucketAccessControls')
class Buckets(_messages.Message):
r"""A list of buckets.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of buckets, this is always
storage#buckets.
nextPageToken: The continuation token, used to page through large result
sets. Provide this value in a subsequent request to return the next page
of results.
"""
items = _messages.MessageField('Bucket', 1, repeated=True)
kind = _messages.StringField(2, default=u'storage#buckets')
nextPageToken = _messages.StringField(3)
class Channel(_messages.Message):
r"""An notification channel used to watch for resource changes.
Messages:
ParamsValue: Additional parameters controlling delivery channel behavior.
Optional.
Fields:
address: The address where notifications are delivered for this channel.
expiration: Date and time of notification channel expiration, expressed as
a Unix timestamp, in milliseconds. Optional.
id: A UUID or similar unique string that identifies this channel.
kind: Identifies this as a notification channel used to watch for changes
to a resource. Value: the fixed string "api#channel".
params: Additional parameters controlling delivery channel behavior.
Optional.
payload: A Boolean value to indicate whether payload is wanted. Optional.
resourceId: An opaque ID that identifies the resource being watched on
this channel. Stable across different API versions.
resourceUri: A version-specific identifier for the watched resource.
token: An arbitrary string delivered to the target address with each
notification delivered over this channel. Optional.
type: The type of delivery mechanism used for this channel.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParamsValue(_messages.Message):
r"""Additional parameters controlling delivery channel behavior. Optional.
Messages:
AdditionalProperty: An additional property for a ParamsValue object.
Fields:
additionalProperties: Declares a new parameter by name.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ParamsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
address = _messages.StringField(1)
expiration = _messages.IntegerField(2)
id = _messages.StringField(3)
kind = _messages.StringField(4, default=u'api#channel')
params = _messages.MessageField('ParamsValue', 5)
payload = _messages.BooleanField(6)
resourceId = _messages.StringField(7)
resourceUri = _messages.StringField(8)
token = _messages.StringField(9)
type = _messages.StringField(10)
class ComposeRequest(_messages.Message):
r"""A Compose request.
Messages:
SourceObjectsValueListEntry: A SourceObjectsValueListEntry object.
Fields:
destination: Properties of the resulting object.
kind: The kind of item this is.
sourceObjects: The list of source objects that will be concatenated into a
single object.
"""
class SourceObjectsValueListEntry(_messages.Message):
r"""A SourceObjectsValueListEntry object.
Messages:
ObjectPreconditionsValue: Conditions that must be met for this operation
to execute.
Fields:
generation: The generation of this object to use as the source.
name: The source object's name. The source object's bucket is implicitly
the destination bucket.
objectPreconditions: Conditions that must be met for this operation to
execute.
"""
class ObjectPreconditionsValue(_messages.Message):
r"""Conditions that must be met for this operation to execute.
Fields:
ifGenerationMatch: Only perform the composition if the generation of
the source object that would be used matches this value. If this
value and a generation are both specified, they must be the same
value or the call will fail.
"""
ifGenerationMatch = _messages.IntegerField(1)
generation = _messages.IntegerField(1)
name = _messages.StringField(2)
objectPreconditions = _messages.MessageField('ObjectPreconditionsValue', 3)
destination = _messages.MessageField('Object', 1)
kind = _messages.StringField(2, default=u'storage#composeRequest')
sourceObjects = _messages.MessageField('SourceObjectsValueListEntry', 3, repeated=True)
class Notification(_messages.Message):
r"""A subscription to receive Google PubSub notifications.
Messages:
CustomAttributesValue: An optional list of additional attributes to attach
to each Cloud PubSub message published for this notification
subscription.
Fields:
bucket: The name of the bucket this subscription is particular to.
custom_attributes: An optional list of additional attributes to attach to
each Cloud PubSub message published for this notification subscription.
etag: HTTP 1.1 Entity tag for this subscription notification.
event_types: If present, only send notifications about listed event types.
If empty, sent notifications for all event types.
id: The ID of the notification.
kind: The kind of item this is. For notifications, this is always
storage#notification.
object_metadata_format: If payload_content is OBJECT_METADATA, controls
the format of that metadata. Otherwise, must not be set.
object_name_prefix: If present, only apply this notification configuration
to object names that begin with this prefix.
payload_content: The desired content of the Payload. Defaults to
OBJECT_METADATA.
selfLink: The canonical URL of this notification.
topic: The Cloud PubSub topic to which this subscription publishes.
Formatted as: '//pubsub.googleapis.com/projects/{project-
identifier}/topics/{my-topic}'
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class CustomAttributesValue(_messages.Message):
r"""An optional list of additional attributes to attach to each Cloud
PubSub message published for this notification subscription.
Messages:
AdditionalProperty: An additional property for a CustomAttributesValue
object.
Fields:
additionalProperties: Additional properties of type
CustomAttributesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a CustomAttributesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
bucket = _messages.StringField(1)
custom_attributes = _messages.MessageField('CustomAttributesValue', 2)
etag = _messages.StringField(3)
event_types = _messages.StringField(4, repeated=True)
id = _messages.StringField(5)
kind = _messages.StringField(6, default=u'storage#notification')
object_metadata_format = _messages.StringField(7, default=u'JSON_API_V1')
object_name_prefix = _messages.StringField(8)
payload_content = _messages.StringField(9, default=u'OBJECT_METADATA')
selfLink = _messages.StringField(10)
topic = _messages.StringField(11)
class Notifications(_messages.Message):
r"""A list of notification subscriptions.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of notifications, this is always
storage#notifications.
"""
items = _messages.MessageField('Notification', 1, repeated=True)
kind = _messages.StringField(2, default=u'storage#notifications')
class Object(_messages.Message):
r"""An object.
Messages:
CustomerEncryptionValue: Metadata of customer-supplied encryption key, if
the object is encrypted by such a key.
MetadataValue: User-provided metadata, in key/value pairs.
OwnerValue: The owner of the object. This will always be the uploader of
the object.
Fields:
acl: Access controls on the object.
bucket: The name of the bucket containing this object.
cacheControl: Cache-Control directive for the object data.
componentCount: Number of underlying components that make up this object.
Components are accumulated by compose operations.
contentDisposition: Content-Disposition of the object data.
contentEncoding: Content-Encoding of the object data.
contentLanguage: Content-Language of the object data.
contentType: Content-Type of the object data. If contentType is not
specified, object downloads will be served as application/octet-stream.
crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded
using base64 in big-endian byte order. For more information about using
the CRC32c checksum, see Hashes and ETags: Best Practices.
customerEncryption: Metadata of customer-supplied encryption key, if the
object is encrypted by such a key.
etag: HTTP 1.1 Entity tag for the object.
generation: The content generation of this object. Used for object
versioning.
id: The ID of the object.
kind: The kind of item this is. For objects, this is always
storage#object.
md5Hash: MD5 hash of the data; encoded using base64. For more information
about using the MD5 hash, see Hashes and ETags: Best Practices.
mediaLink: Media download link.
metadata: User-provided metadata, in key/value pairs.
metageneration: The version of the metadata for this object at this
generation. Used for preconditions and for detecting changes in
metadata. A metageneration number is only meaningful in the context of a
particular generation of a particular object.
name: The name of this object. Required if not specified by URL parameter.
owner: The owner of the object. This will always be the uploader of the
object.
selfLink: The link to this object.
size: Content-Length of the data in bytes.
storageClass: Storage class of the object.
timeCreated: The creation time of the object in RFC 3339 format.
timeDeleted: The deletion time of the object in RFC 3339 format. Will be
returned if and only if this version of the object has been deleted.
updated: The modification time of the object metadata in RFC 3339 format.
"""
class CustomerEncryptionValue(_messages.Message):
r"""Metadata of customer-supplied encryption key, if the object is
encrypted by such a key.
Fields:
encryptionAlgorithm: The encryption algorithm.
keySha256: SHA256 hash value of the encryption key.
"""
encryptionAlgorithm = _messages.StringField(1)
keySha256 = _messages.StringField(2)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""User-provided metadata, in key/value pairs.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: An individual metadata entry.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
class OwnerValue(_messages.Message):
r"""The owner of the object. This will always be the uploader of the
object.
Fields:
entity: The entity, in the form user-userId.
entityId: The ID for the entity.
"""
entity = _messages.StringField(1)
entityId = _messages.StringField(2)
acl = _messages.MessageField('ObjectAccessControl', 1, repeated=True)
bucket = _messages.StringField(2)
cacheControl = _messages.StringField(3)
componentCount = _messages.IntegerField(4, variant=_messages.Variant.INT32)
contentDisposition = _messages.StringField(5)
contentEncoding = _messages.StringField(6)
contentLanguage = _messages.StringField(7)
contentType = _messages.StringField(8)
crc32c = _messages.StringField(9)
customerEncryption = _messages.MessageField('CustomerEncryptionValue', 10)
etag = _messages.StringField(11)
generation = _messages.IntegerField(12)
id = _messages.StringField(13)
kind = _messages.StringField(14, default=u'storage#object')
md5Hash = _messages.StringField(15)
mediaLink = _messages.StringField(16)
metadata = _messages.MessageField('MetadataValue', 17)
metageneration = _messages.IntegerField(18)
name = _messages.StringField(19)
owner = _messages.MessageField('OwnerValue', 20)
selfLink = _messages.StringField(21)
size = _messages.IntegerField(22, variant=_messages.Variant.UINT64)
storageClass = _messages.StringField(23)
timeCreated = _message_types.DateTimeField(24)
timeDeleted = _message_types.DateTimeField(25)
updated = _message_types.DateTimeField(26)
class ObjectAccessControl(_messages.Message):
r"""An access-control entry.
Messages:
ProjectTeamValue: The project team associated with the entity, if any.
Fields:
bucket: The name of the bucket.
domain: The domain associated with the entity, if any.
email: The email address associated with the entity, if any.
entity: The entity holding the permission, in one of the following forms:
- user-userId - user-email - group-groupId - group-email - domain-
domain - project-team-projectId - allUsers - allAuthenticatedUsers
Examples: - The user [email protected] would be [email protected]. -
The group [email protected] would be group-
[email protected]. - To refer to all members of the Google Apps
for Business domain example.com, the entity would be domain-example.com.
entityId: The ID for the entity, if any.
etag: HTTP 1.1 Entity tag for the access-control entry.
generation: The content generation of the object.
id: The ID of the access-control entry.
kind: The kind of item this is. For object access control entries, this is
always storage#objectAccessControl.
object: The name of the object.
projectTeam: The project team associated with the entity, if any.
role: The access permission for the entity. Can be READER or OWNER.
selfLink: The link to this access-control entry.
"""
class ProjectTeamValue(_messages.Message):
r"""The project team associated with the entity, if any.
Fields:
projectNumber: The project number.
team: The team. Can be owners, editors, or viewers.
"""
projectNumber = _messages.StringField(1)
team = _messages.StringField(2)
bucket = _messages.StringField(1)
domain = _messages.StringField(2)
email = _messages.StringField(3)
entity = _messages.StringField(4)
entityId = _messages.StringField(5)
etag = _messages.StringField(6)
generation = _messages.IntegerField(7)
id = _messages.StringField(8)
kind = _messages.StringField(9, default=u'storage#objectAccessControl')
object = _messages.StringField(10)
projectTeam = _messages.MessageField('ProjectTeamValue', 11)
role = _messages.StringField(12)
selfLink = _messages.StringField(13)
class ObjectAccessControls(_messages.Message):
r"""An access-control list.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of object access control
entries, this is always storage#objectAccessControls.
"""
items = _messages.MessageField('extra_types.JsonValue', 1, repeated=True)
kind = _messages.StringField(2, default=u'storage#objectAccessControls')
class Objects(_messages.Message):
r"""A list of objects.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of objects, this is always
storage#objects.
nextPageToken: The continuation token, used to page through large result
sets. Provide this value in a subsequent request to return the next page
of results.
prefixes: The list of prefixes of objects matching-but-not-listed up to
and including the requested delimiter.
"""
items = _messages.MessageField('Object', 1, repeated=True)
kind = _messages.StringField(2, default=u'storage#objects')
nextPageToken = _messages.StringField(3)
prefixes = _messages.StringField(4, repeated=True)
class Policy(_messages.Message):
r"""A bucket/object IAM policy.
Messages:
BindingsValueListEntry: A BindingsValueListEntry object.
Fields:
bindings: An association between a role, which comes with a set of
permissions, and members who may assume that role.
etag: HTTP 1.1 Entity tag for the policy.
kind: The kind of item this is. For policies, this is always
storage#policy. This field is ignored on input.
resourceId: The ID of the resource to which this policy belongs. Will be
of the form buckets/bucket for buckets, and
buckets/bucket/objects/object for objects. A specific generation may be
specified by appending #generationNumber to the end of the object name,
e.g. buckets/my-bucket/objects/data.txt#17. The current generation can
be denoted with #0. This field is ignored on input.
"""
class BindingsValueListEntry(_messages.Message):
r"""A BindingsValueListEntry object.
Fields:
members: A collection of identifiers for members who may assume the
provided role. Recognized identifiers are as follows: - allUsers - A
special identifier that represents anyone on the internet; with or
without a Google account. - allAuthenticatedUsers - A special
identifier that represents anyone who is authenticated with a Google
account or a service account. - user:emailid - An email address that
represents a specific account. For example, user:[email protected] or
user:[email protected]. - serviceAccount:emailid - An email address
that represents a service account. For example, serviceAccount:my-
[email protected] . - group:emailid - An email
address that represents a Google group. For example,
group:[email protected]. - domain:domain - A Google Apps domain
name that represents all the users of that domain. For example,
domain:google.com or domain:example.com. - projectOwner:projectid -
Owners of the given project. For example, projectOwner:my-example-
project - projectEditor:projectid - Editors of the given project.
For example, projectEditor:my-example-project -
projectViewer:projectid - Viewers of the given project. For example,
projectViewer:my-example-project
role: The role to which members belong. Two types of roles are
supported: new IAM roles, which grant permissions that do not map
directly to those provided by ACLs, and legacy IAM roles, which do map
directly to ACL permissions. All roles are of the format
roles/storage.specificRole. The new IAM roles are: -
roles/storage.admin - Full control of Google Cloud Storage resources.
- roles/storage.objectViewer - Read-Only access to Google Cloud
Storage objects. - roles/storage.objectCreator - Access to create
objects in Google Cloud Storage. - roles/storage.objectAdmin - Full
control of Google Cloud Storage objects. The legacy IAM roles are:
- roles/storage.legacyObjectReader - Read-only access to objects
without listing. Equivalent to an ACL entry on an object with the
READER role. - roles/storage.legacyObjectOwner - Read/write access
to existing objects without listing. Equivalent to an ACL entry on an
object with the OWNER role. - roles/storage.legacyBucketReader -
Read access to buckets with object listing. Equivalent to an ACL entry
on a bucket with the READER role. - roles/storage.legacyBucketWriter
- Read access to buckets with object listing/creation/deletion.
Equivalent to an ACL entry on a bucket with the WRITER role. -
roles/storage.legacyBucketOwner - Read and write access to existing
buckets with object listing/creation/deletion. Equivalent to an ACL
entry on a bucket with the OWNER role.
"""
members = _messages.StringField(1, repeated=True)
role = _messages.StringField(2)
bindings = _messages.MessageField('BindingsValueListEntry', 1, repeated=True)
etag = _messages.BytesField(2)
kind = _messages.StringField(3, default=u'storage#policy')
resourceId = _messages.StringField(4)
class RewriteResponse(_messages.Message):
r"""A rewrite response.
Fields:
done: true if the copy is finished; otherwise, false if the copy is in
progress. This property is always present in the response.
kind: The kind of item this is.
objectSize: The total size of the object being copied in bytes. This
property is always present in the response.
resource: A resource containing the metadata for the copied-to object.
This property is present in the response only when copying completes.
rewriteToken: A token to use in subsequent requests to continue copying
data. This token is present in the response only when there is more data
to copy.
totalBytesRewritten: The total bytes written so far, which can be used to
provide a waiting user with a progress indicator. This property is
always present in the response.
"""
done = _messages.BooleanField(1)
kind = _messages.StringField(2, default=u'storage#rewriteResponse')
objectSize = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
resource = _messages.MessageField('Object', 4)
rewriteToken = _messages.StringField(5)
totalBytesRewritten = _messages.IntegerField(6, variant=_messages.Variant.UINT64)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class StorageBucketAccessControlsDeleteRequest(_messages.Message):
r"""A StorageBucketAccessControlsDeleteRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
class StorageBucketAccessControlsDeleteResponse(_messages.Message):
r"""An empty StorageBucketAccessControlsDelete response."""
class StorageBucketAccessControlsGetRequest(_messages.Message):
r"""A StorageBucketAccessControlsGetRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
class StorageBucketAccessControlsListRequest(_messages.Message):
r"""A StorageBucketAccessControlsListRequest object.
Fields:
bucket: Name of a bucket.
"""
bucket = _messages.StringField(1, required=True)
class StorageBucketsDeleteRequest(_messages.Message):
r"""A StorageBucketsDeleteRequest object.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: If set, only deletes the bucket if its
metageneration matches this value.
ifMetagenerationNotMatch: If set, only deletes the bucket if its
metageneration does not match this value.
"""
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2)
ifMetagenerationNotMatch = _messages.IntegerField(3)
class StorageBucketsDeleteResponse(_messages.Message):
r"""An empty StorageBucketsDelete response."""
class StorageBucketsGetIamPolicyRequest(_messages.Message):
r"""A StorageBucketsGetIamPolicyRequest object.
Fields:
bucket: Name of a bucket.
"""
bucket = _messages.StringField(1, required=True)
class StorageBucketsGetRequest(_messages.Message):
r"""A StorageBucketsGetRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: Makes the return of the bucket metadata conditional
on whether the bucket's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the return of the bucket metadata
conditional on whether the bucket's current metageneration does not
match the given value.
projection: Set of properties to return. Defaults to noAcl.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2)
ifMetagenerationNotMatch = _messages.IntegerField(3)
projection = _messages.EnumField('ProjectionValueValuesEnum', 4)
class StorageBucketsInsertRequest(_messages.Message):
r"""A StorageBucketsInsertRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this bucket.
PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of
default object access controls to this bucket.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the bucket resource specifies acl or defaultObjectAcl properties,
when it defaults to full.
Fields:
bucket: A Bucket resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this bucket.
predefinedDefaultObjectAcl: Apply a predefined set of default object
access controls to this bucket.
project: A valid API project identifier.
projection: Set of properties to return. Defaults to noAcl, unless the
bucket resource specifies acl or defaultObjectAcl properties, when it
defaults to full.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this bucket.
Values:
authenticatedRead: Project team owners get OWNER access, and
allAuthenticatedUsers get READER access.
private: Project team owners get OWNER access.
projectPrivate: Project team members get access according to their
roles.
publicRead: Project team owners get OWNER access, and allUsers get
READER access.
publicReadWrite: Project team owners get OWNER access, and allUsers get
WRITER access.
"""
authenticatedRead = 0
private = 1
projectPrivate = 2
publicRead = 3
publicReadWrite = 4
class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of default object access controls to this
bucket.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl, unless the bucket
resource specifies acl or defaultObjectAcl properties, when it defaults to
full.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.MessageField('Bucket', 1)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 2)
predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 3)
project = _messages.StringField(4, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
class StorageBucketsListRequest(_messages.Message):
r"""A StorageBucketsListRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
maxResults: Maximum number of buckets to return.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
prefix: Filter results to buckets whose names begin with this prefix.
project: A valid API project identifier.
projection: Set of properties to return. Defaults to noAcl.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(2)
prefix = _messages.StringField(3)
project = _messages.StringField(4, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
class StorageBucketsPatchRequest(_messages.Message):
r"""A StorageBucketsPatchRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this bucket.
PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of
default object access controls to this bucket.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of a bucket.
bucketResource: A Bucket resource to be passed as the request body.
ifMetagenerationMatch: Makes the return of the bucket metadata conditional
on whether the bucket's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the return of the bucket metadata
conditional on whether the bucket's current metageneration does not
match the given value.
predefinedAcl: Apply a predefined set of access controls to this bucket.
predefinedDefaultObjectAcl: Apply a predefined set of default object
access controls to this bucket.
projection: Set of properties to return. Defaults to full.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this bucket.
Values:
authenticatedRead: Project team owners get OWNER access, and
allAuthenticatedUsers get READER access.
private: Project team owners get OWNER access.
projectPrivate: Project team members get access according to their
roles.
publicRead: Project team owners get OWNER access, and allUsers get
READER access.
publicReadWrite: Project team owners get OWNER access, and allUsers get
WRITER access.
"""
authenticatedRead = 0
private = 1
projectPrivate = 2
publicRead = 3
publicReadWrite = 4
class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of default object access controls to this
bucket.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
bucketResource = _messages.MessageField('Bucket', 2)
ifMetagenerationMatch = _messages.IntegerField(3)
ifMetagenerationNotMatch = _messages.IntegerField(4)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 5)
predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6)
projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
class StorageBucketsSetIamPolicyRequest(_messages.Message):
r"""A StorageBucketsSetIamPolicyRequest object.
Fields:
bucket: Name of a bucket.
policy: A Policy resource to be passed as the request body.
"""
bucket = _messages.StringField(1, required=True)
policy = _messages.MessageField('Policy', 2)
class StorageBucketsTestIamPermissionsRequest(_messages.Message):
r"""A StorageBucketsTestIamPermissionsRequest object.
Fields:
bucket: Name of a bucket.
permissions: Permissions to test.
"""
bucket = _messages.StringField(1, required=True)
permissions = _messages.StringField(2, required=True)
class StorageBucketsUpdateRequest(_messages.Message):
r"""A StorageBucketsUpdateRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this bucket.
PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of
default object access controls to this bucket.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of a bucket.
bucketResource: A Bucket resource to be passed as the request body.
ifMetagenerationMatch: Makes the return of the bucket metadata conditional
on whether the bucket's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the return of the bucket metadata
conditional on whether the bucket's current metageneration does not
match the given value.
predefinedAcl: Apply a predefined set of access controls to this bucket.
predefinedDefaultObjectAcl: Apply a predefined set of default object
access controls to this bucket.
projection: Set of properties to return. Defaults to full.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this bucket.
Values:
authenticatedRead: Project team owners get OWNER access, and
allAuthenticatedUsers get READER access.
private: Project team owners get OWNER access.
projectPrivate: Project team members get access according to their
roles.
publicRead: Project team owners get OWNER access, and allUsers get
READER access.
publicReadWrite: Project team owners get OWNER access, and allUsers get
WRITER access.
"""
authenticatedRead = 0
private = 1
projectPrivate = 2
publicRead = 3
publicReadWrite = 4
class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of default object access controls to this
bucket.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
bucketResource = _messages.MessageField('Bucket', 2)
ifMetagenerationMatch = _messages.IntegerField(3)
ifMetagenerationNotMatch = _messages.IntegerField(4)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 5)
predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6)
projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
class StorageChannelsStopResponse(_messages.Message):
r"""An empty StorageChannelsStop response."""
class StorageDefaultObjectAccessControlsDeleteRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsDeleteRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
class StorageDefaultObjectAccessControlsDeleteResponse(_messages.Message):
r"""An empty StorageDefaultObjectAccessControlsDelete response."""
class StorageDefaultObjectAccessControlsGetRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsGetRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
class StorageDefaultObjectAccessControlsListRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsListRequest object.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: If present, only return default ACL listing if the
bucket's current metageneration matches this value.
ifMetagenerationNotMatch: If present, only return default ACL listing if
the bucket's current metageneration does not match the given value.
"""
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2)
ifMetagenerationNotMatch = _messages.IntegerField(3)
class StorageNotificationsDeleteRequest(_messages.Message):
r"""A StorageNotificationsDeleteRequest object.
Fields:
notification: ID of the notification to delete.
"""
notification = _messages.StringField(1, required=True)
class StorageNotificationsDeleteResponse(_messages.Message):
r"""An empty StorageNotificationsDelete response."""
class StorageNotificationsGetRequest(_messages.Message):
r"""A StorageNotificationsGetRequest object.
Fields:
notification: Notification ID
"""
notification = _messages.StringField(1, required=True)
class StorageNotificationsListRequest(_messages.Message):
r"""A StorageNotificationsListRequest object.
Fields:
bucket: Name of a GCS bucket.
"""
bucket = _messages.StringField(1, required=True)
class StorageObjectAccessControlsDeleteRequest(_messages.Message):
r"""A StorageObjectAccessControlsDeleteRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
class StorageObjectAccessControlsDeleteResponse(_messages.Message):
r"""An empty StorageObjectAccessControlsDelete response."""
class StorageObjectAccessControlsGetRequest(_messages.Message):
r"""A StorageObjectAccessControlsGetRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
class StorageObjectAccessControlsInsertRequest(_messages.Message):
r"""A StorageObjectAccessControlsInsertRequest object.
Fields:
bucket: Name of a bucket.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 4)
class StorageObjectAccessControlsListRequest(_messages.Message):
r"""A StorageObjectAccessControlsListRequest object.
Fields:
bucket: Name of a bucket.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
class StorageObjectAccessControlsPatchRequest(_messages.Message):
r"""A StorageObjectAccessControlsPatchRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 5)
class StorageObjectAccessControlsUpdateRequest(_messages.Message):
r"""A StorageObjectAccessControlsUpdateRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 5)
class StorageObjectsComposeRequest(_messages.Message):
r"""A StorageObjectsComposeRequest object.
Enums:
DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
controls to the destination object.
Fields:
composeRequest: A ComposeRequest resource to be passed as the request
body.
destinationBucket: Name of the bucket in which to store the new object.
destinationObject: Name of the new object. For information about how to
URL encode object names to be path safe, see Encoding URI Path Parts.
destinationPredefinedAcl: Apply a predefined set of access controls to the
destination object.
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
"""
class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to the destination object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
composeRequest = _messages.MessageField('ComposeRequest', 1)
destinationBucket = _messages.StringField(2, required=True)
destinationObject = _messages.StringField(3, required=True)
destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 4)
ifGenerationMatch = _messages.IntegerField(5)
ifMetagenerationMatch = _messages.IntegerField(6)
class StorageObjectsCopyRequest(_messages.Message):
r"""A StorageObjectsCopyRequest object.
Enums:
DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
controls to the destination object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the object resource specifies the acl property, when it defaults
to full.
Fields:
destinationBucket: Name of the bucket in which to store the new object.
Overrides the provided object metadata's bucket value, if any.For
information about how to URL encode object names to be path safe, see
Encoding URI Path Parts.
destinationObject: Name of the new object. Required when the object
metadata is not otherwise provided. Overrides the object metadata's name
value, if any.
destinationPredefinedAcl: Apply a predefined set of access controls to the
destination object.
ifGenerationMatch: Makes the operation conditional on whether the
destination object's current generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
destination object's current generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
destination object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
destination object's current metageneration does not match the given
value.
ifSourceGenerationMatch: Makes the operation conditional on whether the
source object's generation matches the given value.
ifSourceGenerationNotMatch: Makes the operation conditional on whether the
source object's generation does not match the given value.
ifSourceMetagenerationMatch: Makes the operation conditional on whether
the source object's current metageneration matches the given value.
ifSourceMetagenerationNotMatch: Makes the operation conditional on whether
the source object's current metageneration does not match the given
value.
object: A Object resource to be passed as the request body.
projection: Set of properties to return. Defaults to noAcl, unless the
object resource specifies the acl property, when it defaults to full.
sourceBucket: Name of the bucket in which to find the source object.
sourceGeneration: If present, selects a specific revision of the source
object (as opposed to the latest version, the default).
sourceObject: Name of the source object. For information about how to URL
encode object names to be path safe, see Encoding URI Path Parts.
"""
class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to the destination object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl, unless the object
resource specifies the acl property, when it defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
destinationBucket = _messages.StringField(1, required=True)
destinationObject = _messages.StringField(2, required=True)
destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 3)
ifGenerationMatch = _messages.IntegerField(4)
ifGenerationNotMatch = _messages.IntegerField(5)
ifMetagenerationMatch = _messages.IntegerField(6)
ifMetagenerationNotMatch = _messages.IntegerField(7)
ifSourceGenerationMatch = _messages.IntegerField(8)
ifSourceGenerationNotMatch = _messages.IntegerField(9)
ifSourceMetagenerationMatch = _messages.IntegerField(10)
ifSourceMetagenerationNotMatch = _messages.IntegerField(11)
object = _messages.MessageField('Object', 12)
projection = _messages.EnumField('ProjectionValueValuesEnum', 13)
sourceBucket = _messages.StringField(14, required=True)
sourceGeneration = _messages.IntegerField(15)
sourceObject = _messages.StringField(16, required=True)
class StorageObjectsDeleteRequest(_messages.Message):
r"""A StorageObjectsDeleteRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, permanently deletes a specific revision of this
object (as opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
class StorageObjectsDeleteResponse(_messages.Message):
r"""An empty StorageObjectsDelete response."""
class StorageObjectsGetIamPolicyRequest(_messages.Message):
r"""A StorageObjectsGetIamPolicyRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
class StorageObjectsGetRequest(_messages.Message):
r"""A StorageObjectsGetRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
projection: Set of properties to return. Defaults to noAcl.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 8)
class StorageObjectsInsertRequest(_messages.Message):
r"""A StorageObjectsInsertRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the object resource specifies the acl property, when it defaults
to full.
Fields:
bucket: Name of the bucket in which to store the new object. Overrides the
provided object metadata's bucket value, if any.
contentEncoding: If set, sets the contentEncoding property of the final
object to this value. Setting this parameter is equivalent to setting
the contentEncoding metadata property. This can be useful when uploading
an object with uploadType=media to indicate the encoding of the content
being uploaded.
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
name: Name of the object. Required when the object metadata is not
otherwise provided. Overrides the object metadata's name value, if any.
For information about how to URL encode object names to be path safe,
see Encoding URI Path Parts.
object: A Object resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this object.
projection: Set of properties to return. Defaults to noAcl, unless the
object resource specifies the acl property, when it defaults to full.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl, unless the object
resource specifies the acl property, when it defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
contentEncoding = _messages.StringField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
name = _messages.StringField(7)
object = _messages.MessageField('Object', 8)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
class StorageObjectsListRequest(_messages.Message):
r"""A StorageObjectsListRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of the bucket in which to look for objects.
delimiter: Returns results in a directory-like mode. items will contain
only objects whose names, aside from the prefix, do not contain
delimiter. Objects whose names, aside from the prefix, contain delimiter
will have their name, truncated after the delimiter, returned in
prefixes. Duplicate prefixes are omitted.
maxResults: Maximum number of items plus prefixes to return. As duplicate
prefixes are omitted, fewer total results may be returned than
requested. The default value of this parameter is 1,000 items.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
prefix: Filter results to objects whose names begin with this prefix.
projection: Set of properties to return. Defaults to noAcl.
versions: If true, lists all versions of an object as distinct results.
The default is false. For more information, see Object Versioning.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
delimiter = _messages.StringField(2)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(4)
prefix = _messages.StringField(5)
projection = _messages.EnumField('ProjectionValueValuesEnum', 6)
versions = _messages.BooleanField(7)
class StorageObjectsPatchRequest(_messages.Message):
r"""A StorageObjectsPatchRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectResource: A Object resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this object.
projection: Set of properties to return. Defaults to full.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
objectResource = _messages.MessageField('Object', 8)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
class StorageObjectsRewriteRequest(_messages.Message):
r"""A StorageObjectsRewriteRequest object.
Enums:
DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
controls to the destination object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the object resource specifies the acl property, when it defaults
to full.
Fields:
destinationBucket: Name of the bucket in which to store the new object.
Overrides the provided object metadata's bucket value, if any.
destinationObject: Name of the new object. Required when the object
metadata is not otherwise provided. Overrides the object metadata's name
value, if any. For information about how to URL encode object names to
be path safe, see Encoding URI Path Parts.
destinationPredefinedAcl: Apply a predefined set of access controls to the
destination object.
ifGenerationMatch: Makes the operation conditional on whether the
destination object's current generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
destination object's current generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
destination object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
destination object's current metageneration does not match the given
value.
ifSourceGenerationMatch: Makes the operation conditional on whether the
source object's generation matches the given value.
ifSourceGenerationNotMatch: Makes the operation conditional on whether the
source object's generation does not match the given value.
ifSourceMetagenerationMatch: Makes the operation conditional on whether
the source object's current metageneration matches the given value.
ifSourceMetagenerationNotMatch: Makes the operation conditional on whether
the source object's current metageneration does not match the given
value.
maxBytesRewrittenPerCall: The maximum number of bytes that will be
rewritten per rewrite request. Most callers shouldn't need to specify
this parameter - it is primarily in place to support testing. If
specified the value must be an integral multiple of 1 MiB (1048576).
Also, this only applies to requests where the source and destination
span locations and/or storage classes. Finally, this value must not
change across rewrite calls else you'll get an error that the
rewriteToken is invalid.
object: A Object resource to be passed as the request body.
projection: Set of properties to return. Defaults to noAcl, unless the
object resource specifies the acl property, when it defaults to full.
rewriteToken: Include this field (from the previous rewrite response) on
each rewrite request after the first one, until the rewrite response
'done' flag is true. Calls that provide a rewriteToken can omit all
other request fields, but if included those fields must match the values
provided in the first rewrite request.
sourceBucket: Name of the bucket in which to find the source object.
sourceGeneration: If present, selects a specific revision of the source
object (as opposed to the latest version, the default).
sourceObject: Name of the source object. For information about how to URL
encode object names to be path safe, see Encoding URI Path Parts.
"""
class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to the destination object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl, unless the object
resource specifies the acl property, when it defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
destinationBucket = _messages.StringField(1, required=True)
destinationObject = _messages.StringField(2, required=True)
destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 3)
ifGenerationMatch = _messages.IntegerField(4)
ifGenerationNotMatch = _messages.IntegerField(5)
ifMetagenerationMatch = _messages.IntegerField(6)
ifMetagenerationNotMatch = _messages.IntegerField(7)
ifSourceGenerationMatch = _messages.IntegerField(8)
ifSourceGenerationNotMatch = _messages.IntegerField(9)
ifSourceMetagenerationMatch = _messages.IntegerField(10)
ifSourceMetagenerationNotMatch = _messages.IntegerField(11)
maxBytesRewrittenPerCall = _messages.IntegerField(12)
object = _messages.MessageField('Object', 13)
projection = _messages.EnumField('ProjectionValueValuesEnum', 14)
rewriteToken = _messages.StringField(15)
sourceBucket = _messages.StringField(16, required=True)
sourceGeneration = _messages.IntegerField(17)
sourceObject = _messages.StringField(18, required=True)
class StorageObjectsSetIamPolicyRequest(_messages.Message):
r"""A StorageObjectsSetIamPolicyRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
policy: A Policy resource to be passed as the request body.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
policy = _messages.MessageField('Policy', 4)
class StorageObjectsTestIamPermissionsRequest(_messages.Message):
r"""A StorageObjectsTestIamPermissionsRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
permissions: Permissions to test.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
permissions = _messages.StringField(4, required=True)
class StorageObjectsUpdateRequest(_messages.Message):
r"""A StorageObjectsUpdateRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectResource: A Object resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this object.
projection: Set of properties to return. Defaults to full.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
objectResource = _messages.MessageField('Object', 8)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
class StorageObjectsWatchAllRequest(_messages.Message):
r"""A StorageObjectsWatchAllRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of the bucket in which to look for objects.
channel: A Channel resource to be passed as the request body.
delimiter: Returns results in a directory-like mode. items will contain
only objects whose names, aside from the prefix, do not contain
delimiter. Objects whose names, aside from the prefix, contain delimiter
will have their name, truncated after the delimiter, returned in
prefixes. Duplicate prefixes are omitted.
maxResults: Maximum number of items plus prefixes to return. As duplicate
prefixes are omitted, fewer total results may be returned than
requested. The default value of this parameter is 1,000 items.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
prefix: Filter results to objects whose names begin with this prefix.
projection: Set of properties to return. Defaults to noAcl.
versions: If true, lists all versions of an object as distinct results.
The default is false. For more information, see Object Versioning.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
channel = _messages.MessageField('Channel', 2)
delimiter = _messages.StringField(3)
maxResults = _messages.IntegerField(4, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(5)
prefix = _messages.StringField(6)
projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
versions = _messages.BooleanField(8)
class TestIamPermissionsResponse(_messages.Message):
r"""A storage.(buckets|objects).testIamPermissions response.
Fields:
kind: The kind of item this is.
permissions: The permissions held by the caller. Permissions are always of
the format storage.resource.capability, where resource is one of buckets
or objects. The supported permissions are as follows: -
storage.buckets.delete - Delete bucket. - storage.buckets.get - Read
bucket metadata. - storage.buckets.getIamPolicy - Read bucket IAM
policy. - storage.buckets.create - Create bucket. -
storage.buckets.list - List buckets. - storage.buckets.setIamPolicy -
Update bucket IAM policy. - storage.buckets.update - Update bucket
metadata. - storage.objects.delete - Delete object. -
storage.objects.get - Read object data and metadata. -
storage.objects.getIamPolicy - Read object IAM policy. -
storage.objects.create - Create object. - storage.objects.list - List
objects. - storage.objects.setIamPolicy - Update object IAM policy.
- storage.objects.update - Update object metadata.
"""
kind = _messages.StringField(1, default=u'storage#testIamPermissionsResponse')
permissions = _messages.StringField(2, repeated=True)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/apitools/samples/storage_sample/storage_v1/storage_v1_messages.py
| 0.884033 | 0.282926 |
storage_v1_messages.py
|
pypi
|
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import extra_types
package = 'fusiontables'
class Bucket(_messages.Message):
r"""Specifies the minimum and maximum values, the color, opacity, icon and
weight of a bucket within a StyleSetting.
Fields:
color: Color of line or the interior of a polygon in #RRGGBB format.
icon: Icon name used for a point.
max: Maximum value in the selected column for a row to be styled according
to the bucket color, opacity, icon, or weight.
min: Minimum value in the selected column for a row to be styled according
to the bucket color, opacity, icon, or weight.
opacity: Opacity of the color: 0.0 (transparent) to 1.0 (opaque).
weight: Width of a line (in pixels).
"""
color = _messages.StringField(1)
icon = _messages.StringField(2)
max = _messages.FloatField(3)
min = _messages.FloatField(4)
opacity = _messages.FloatField(5)
weight = _messages.IntegerField(6, variant=_messages.Variant.INT32)
class Column(_messages.Message):
r"""Specifies the id, name and type of a column in a table.
Messages:
BaseColumnValue: Optional identifier of the base column. If present, this
column is derived from the specified base column.
Fields:
baseColumn: Optional identifier of the base column. If present, this
column is derived from the specified base column.
columnId: Identifier for the column.
description: Optional column description.
graph_predicate: Optional column predicate. Used to map table to graph
data model (subject,predicate,object) See http://www.w3.org/TR/2014/REC-
rdf11-concepts-20140225/#data-model
kind: Type name: a template for an individual column.
name: Required name of the column.
type: Required type of the column.
"""
class BaseColumnValue(_messages.Message):
r"""Optional identifier of the base column. If present, this column is
derived from the specified base column.
Fields:
columnId: The id of the column in the base table from which this column
is derived.
tableIndex: Offset to the entry in the list of base tables in the table
definition.
"""
columnId = _messages.IntegerField(1, variant=_messages.Variant.INT32)
tableIndex = _messages.IntegerField(2, variant=_messages.Variant.INT32)
baseColumn = _messages.MessageField('BaseColumnValue', 1)
columnId = _messages.IntegerField(2, variant=_messages.Variant.INT32)
description = _messages.StringField(3)
graph_predicate = _messages.StringField(4)
kind = _messages.StringField(5, default=u'fusiontables#column')
name = _messages.StringField(6)
type = _messages.StringField(7)
class ColumnList(_messages.Message):
r"""Represents a list of columns in a table.
Fields:
items: List of all requested columns.
kind: Type name: a list of all columns.
nextPageToken: Token used to access the next page of this result. No token
is displayed if there are no more pages left.
totalItems: Total number of columns for the table.
"""
items = _messages.MessageField('Column', 1, repeated=True)
kind = _messages.StringField(2, default=u'fusiontables#columnList')
nextPageToken = _messages.StringField(3)
totalItems = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class FusiontablesColumnDeleteRequest(_messages.Message):
r"""A FusiontablesColumnDeleteRequest object.
Fields:
columnId: Name or identifier for the column being deleted.
tableId: Table from which the column is being deleted.
"""
columnId = _messages.StringField(1, required=True)
tableId = _messages.StringField(2, required=True)
class FusiontablesColumnDeleteResponse(_messages.Message):
r"""An empty FusiontablesColumnDelete response."""
class FusiontablesColumnGetRequest(_messages.Message):
r"""A FusiontablesColumnGetRequest object.
Fields:
columnId: Name or identifier for the column that is being requested.
tableId: Table to which the column belongs.
"""
columnId = _messages.StringField(1, required=True)
tableId = _messages.StringField(2, required=True)
class FusiontablesColumnInsertRequest(_messages.Message):
r"""A FusiontablesColumnInsertRequest object.
Fields:
column: A Column resource to be passed as the request body.
tableId: Table for which a new column is being added.
"""
column = _messages.MessageField('Column', 1)
tableId = _messages.StringField(2, required=True)
class FusiontablesColumnListRequest(_messages.Message):
r"""A FusiontablesColumnListRequest object.
Fields:
maxResults: Maximum number of columns to return. Optional. Default is 5.
pageToken: Continuation token specifying which result page to return.
Optional.
tableId: Table whose columns are being listed.
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(2)
tableId = _messages.StringField(3, required=True)
class FusiontablesColumnPatchRequest(_messages.Message):
r"""A FusiontablesColumnPatchRequest object.
Fields:
column: A Column resource to be passed as the request body.
columnId: Name or identifier for the column that is being updated.
tableId: Table for which the column is being updated.
"""
column = _messages.MessageField('Column', 1)
columnId = _messages.StringField(2, required=True)
tableId = _messages.StringField(3, required=True)
class FusiontablesColumnUpdateRequest(_messages.Message):
r"""A FusiontablesColumnUpdateRequest object.
Fields:
column: A Column resource to be passed as the request body.
columnId: Name or identifier for the column that is being updated.
tableId: Table for which the column is being updated.
"""
column = _messages.MessageField('Column', 1)
columnId = _messages.StringField(2, required=True)
tableId = _messages.StringField(3, required=True)
class FusiontablesQuerySqlGetRequest(_messages.Message):
r"""A FusiontablesQuerySqlGetRequest object.
Fields:
hdrs: Should column names be included (in the first row)?. Default is
true.
sql: An SQL SELECT/SHOW/DESCRIBE statement.
typed: Should typed values be returned in the (JSON) response -- numbers
for numeric values and parsed geometries for KML values? Default is
true.
"""
hdrs = _messages.BooleanField(1)
sql = _messages.StringField(2, required=True)
typed = _messages.BooleanField(3)
class FusiontablesQuerySqlRequest(_messages.Message):
r"""A FusiontablesQuerySqlRequest object.
Fields:
hdrs: Should column names be included (in the first row)?. Default is
true.
sql: An SQL SELECT/SHOW/DESCRIBE/INSERT/UPDATE/DELETE/CREATE statement.
typed: Should typed values be returned in the (JSON) response -- numbers
for numeric values and parsed geometries for KML values? Default is
true.
"""
hdrs = _messages.BooleanField(1)
sql = _messages.StringField(2, required=True)
typed = _messages.BooleanField(3)
class FusiontablesStyleDeleteRequest(_messages.Message):
r"""A FusiontablesStyleDeleteRequest object.
Fields:
styleId: Identifier (within a table) for the style being deleted
tableId: Table from which the style is being deleted
"""
styleId = _messages.IntegerField(1, required=True, variant=_messages.Variant.INT32)
tableId = _messages.StringField(2, required=True)
class FusiontablesStyleDeleteResponse(_messages.Message):
r"""An empty FusiontablesStyleDelete response."""
class FusiontablesStyleGetRequest(_messages.Message):
r"""A FusiontablesStyleGetRequest object.
Fields:
styleId: Identifier (integer) for a specific style in a table
tableId: Table to which the requested style belongs
"""
styleId = _messages.IntegerField(1, required=True, variant=_messages.Variant.INT32)
tableId = _messages.StringField(2, required=True)
class FusiontablesStyleListRequest(_messages.Message):
r"""A FusiontablesStyleListRequest object.
Fields:
maxResults: Maximum number of styles to return. Optional. Default is 5.
pageToken: Continuation token specifying which result page to return.
Optional.
tableId: Table whose styles are being listed
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(2)
tableId = _messages.StringField(3, required=True)
class FusiontablesTableCopyRequest(_messages.Message):
r"""A FusiontablesTableCopyRequest object.
Fields:
copyPresentation: Whether to also copy tabs, styles, and templates.
Default is false.
tableId: ID of the table that is being copied.
"""
copyPresentation = _messages.BooleanField(1)
tableId = _messages.StringField(2, required=True)
class FusiontablesTableDeleteRequest(_messages.Message):
r"""A FusiontablesTableDeleteRequest object.
Fields:
tableId: ID of the table that is being deleted.
"""
tableId = _messages.StringField(1, required=True)
class FusiontablesTableDeleteResponse(_messages.Message):
r"""An empty FusiontablesTableDelete response."""
class FusiontablesTableGetRequest(_messages.Message):
r"""A FusiontablesTableGetRequest object.
Fields:
tableId: Identifier(ID) for the table being requested.
"""
tableId = _messages.StringField(1, required=True)
class FusiontablesTableImportRowsRequest(_messages.Message):
r"""A FusiontablesTableImportRowsRequest object.
Fields:
delimiter: The delimiter used to separate cell values. This can only
consist of a single character. Default is ','.
encoding: The encoding of the content. Default is UTF-8. Use 'auto-detect'
if you are unsure of the encoding.
endLine: The index of the last line from which to start importing,
exclusive. Thus, the number of imported lines is endLine - startLine. If
this parameter is not provided, the file will be imported until the last
line of the file. If endLine is negative, then the imported content will
exclude the last endLine lines. That is, if endline is negative, no line
will be imported whose index is greater than N + endLine where N is the
number of lines in the file, and the number of imported lines will be N
+ endLine - startLine.
isStrict: Whether the CSV must have the same number of values for each
row. If false, rows with fewer values will be padded with empty values.
Default is true.
startLine: The index of the first line from which to start importing,
inclusive. Default is 0.
tableId: The table into which new rows are being imported.
"""
delimiter = _messages.StringField(1)
encoding = _messages.StringField(2)
endLine = _messages.IntegerField(3, variant=_messages.Variant.INT32)
isStrict = _messages.BooleanField(4)
startLine = _messages.IntegerField(5, variant=_messages.Variant.INT32)
tableId = _messages.StringField(6, required=True)
class FusiontablesTableImportTableRequest(_messages.Message):
r"""A FusiontablesTableImportTableRequest object.
Fields:
delimiter: The delimiter used to separate cell values. This can only
consist of a single character. Default is ','.
encoding: The encoding of the content. Default is UTF-8. Use 'auto-detect'
if you are unsure of the encoding.
name: The name to be assigned to the new table.
"""
delimiter = _messages.StringField(1)
encoding = _messages.StringField(2)
name = _messages.StringField(3, required=True)
class FusiontablesTableListRequest(_messages.Message):
r"""A FusiontablesTableListRequest object.
Fields:
maxResults: Maximum number of styles to return. Optional. Default is 5.
pageToken: Continuation token specifying which result page to return.
Optional.
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(2)
class FusiontablesTablePatchRequest(_messages.Message):
r"""A FusiontablesTablePatchRequest object.
Fields:
replaceViewDefinition: Should the view definition also be updated? The
specified view definition replaces the existing one. Only a view can be
updated with a new definition.
table: A Table resource to be passed as the request body.
tableId: ID of the table that is being updated.
"""
replaceViewDefinition = _messages.BooleanField(1)
table = _messages.MessageField('Table', 2)
tableId = _messages.StringField(3, required=True)
class FusiontablesTableUpdateRequest(_messages.Message):
r"""A FusiontablesTableUpdateRequest object.
Fields:
replaceViewDefinition: Should the view definition also be updated? The
specified view definition replaces the existing one. Only a view can be
updated with a new definition.
table: A Table resource to be passed as the request body.
tableId: ID of the table that is being updated.
"""
replaceViewDefinition = _messages.BooleanField(1)
table = _messages.MessageField('Table', 2)
tableId = _messages.StringField(3, required=True)
class FusiontablesTaskDeleteRequest(_messages.Message):
r"""A FusiontablesTaskDeleteRequest object.
Fields:
tableId: Table from which the task is being deleted.
taskId: A string attribute.
"""
tableId = _messages.StringField(1, required=True)
taskId = _messages.StringField(2, required=True)
class FusiontablesTaskDeleteResponse(_messages.Message):
r"""An empty FusiontablesTaskDelete response."""
class FusiontablesTaskGetRequest(_messages.Message):
r"""A FusiontablesTaskGetRequest object.
Fields:
tableId: Table to which the task belongs.
taskId: A string attribute.
"""
tableId = _messages.StringField(1, required=True)
taskId = _messages.StringField(2, required=True)
class FusiontablesTaskListRequest(_messages.Message):
r"""A FusiontablesTaskListRequest object.
Fields:
maxResults: Maximum number of columns to return. Optional. Default is 5.
pageToken: A string attribute.
startIndex: A integer attribute.
tableId: Table whose tasks are being listed.
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(2)
startIndex = _messages.IntegerField(3, variant=_messages.Variant.UINT32)
tableId = _messages.StringField(4, required=True)
class FusiontablesTemplateDeleteRequest(_messages.Message):
r"""A FusiontablesTemplateDeleteRequest object.
Fields:
tableId: Table from which the template is being deleted
templateId: Identifier for the template which is being deleted
"""
tableId = _messages.StringField(1, required=True)
templateId = _messages.IntegerField(2, required=True, variant=_messages.Variant.INT32)
class FusiontablesTemplateDeleteResponse(_messages.Message):
r"""An empty FusiontablesTemplateDelete response."""
class FusiontablesTemplateGetRequest(_messages.Message):
r"""A FusiontablesTemplateGetRequest object.
Fields:
tableId: Table to which the template belongs
templateId: Identifier for the template that is being requested
"""
tableId = _messages.StringField(1, required=True)
templateId = _messages.IntegerField(2, required=True, variant=_messages.Variant.INT32)
class FusiontablesTemplateListRequest(_messages.Message):
r"""A FusiontablesTemplateListRequest object.
Fields:
maxResults: Maximum number of templates to return. Optional. Default is 5.
pageToken: Continuation token specifying which results page to return.
Optional.
tableId: Identifier for the table whose templates are being requested
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(2)
tableId = _messages.StringField(3, required=True)
class Geometry(_messages.Message):
r"""Represents a Geometry object.
Fields:
geometries: The list of geometries in this geometry collection.
geometry: A extra_types.JsonValue attribute.
type: Type: A collection of geometries.
"""
geometries = _messages.MessageField('extra_types.JsonValue', 1, repeated=True)
geometry = _messages.MessageField('extra_types.JsonValue', 2)
type = _messages.StringField(3, default=u'GeometryCollection')
class Import(_messages.Message):
r"""Represents an import request.
Fields:
kind: Type name: a template for an import request.
numRowsReceived: The number of rows received from the import request.
"""
kind = _messages.StringField(1, default=u'fusiontables#import')
numRowsReceived = _messages.IntegerField(2)
class Line(_messages.Message):
r"""Represents a line geometry.
Messages:
CoordinatesValueListEntry: Single entry in a CoordinatesValue.
Fields:
coordinates: The coordinates that define the line.
type: Type: A line geometry.
"""
class CoordinatesValueListEntry(_messages.Message):
r"""Single entry in a CoordinatesValue.
Fields:
entry: A number attribute.
"""
entry = _messages.FloatField(1, repeated=True)
coordinates = _messages.MessageField('CoordinatesValueListEntry', 1, repeated=True)
type = _messages.StringField(2, default=u'LineString')
class LineStyle(_messages.Message):
r"""Represents a LineStyle within a StyleSetting
Fields:
strokeColor: Color of the line in #RRGGBB format.
strokeColorStyler: Column-value, gradient or buckets styler that is used
to determine the line color and opacity.
strokeOpacity: Opacity of the line : 0.0 (transparent) to 1.0 (opaque).
strokeWeight: Width of the line in pixels.
strokeWeightStyler: Column-value or bucket styler that is used to
determine the width of the line.
"""
strokeColor = _messages.StringField(1)
strokeColorStyler = _messages.MessageField('StyleFunction', 2)
strokeOpacity = _messages.FloatField(3)
strokeWeight = _messages.IntegerField(4, variant=_messages.Variant.INT32)
strokeWeightStyler = _messages.MessageField('StyleFunction', 5)
class Point(_messages.Message):
r"""Represents a point object.
Fields:
coordinates: The coordinates that define the point.
type: Point: A point geometry.
"""
coordinates = _messages.FloatField(1, repeated=True)
type = _messages.StringField(2, default=u'Point')
class PointStyle(_messages.Message):
r"""Represents a PointStyle within a StyleSetting
Fields:
iconName: Name of the icon. Use values defined in
http://www.google.com/fusiontables/DataSource?dsrcid=308519
iconStyler: Column or a bucket value from which the icon name is to be
determined.
"""
iconName = _messages.StringField(1)
iconStyler = _messages.MessageField('StyleFunction', 2)
class Polygon(_messages.Message):
r"""Represents a polygon object.
Messages:
CoordinatesValueListEntry: Single entry in a CoordinatesValue.
Fields:
coordinates: The coordinates that define the polygon.
type: Type: A polygon geometry.
"""
class CoordinatesValueListEntry(_messages.Message):
r"""Single entry in a CoordinatesValue.
Messages:
EntryValueListEntry: Single entry in a EntryValue.
Fields:
entry: A EntryValueListEntry attribute.
"""
class EntryValueListEntry(_messages.Message):
r"""Single entry in a EntryValue.
Fields:
entry: A number attribute.
"""
entry = _messages.FloatField(1, repeated=True)
entry = _messages.MessageField('EntryValueListEntry', 1, repeated=True)
coordinates = _messages.MessageField('CoordinatesValueListEntry', 1, repeated=True)
type = _messages.StringField(2, default=u'Polygon')
class PolygonStyle(_messages.Message):
r"""Represents a PolygonStyle within a StyleSetting
Fields:
fillColor: Color of the interior of the polygon in #RRGGBB format.
fillColorStyler: Column-value, gradient, or bucket styler that is used to
determine the interior color and opacity of the polygon.
fillOpacity: Opacity of the interior of the polygon: 0.0 (transparent) to
1.0 (opaque).
strokeColor: Color of the polygon border in #RRGGBB format.
strokeColorStyler: Column-value, gradient or buckets styler that is used
to determine the border color and opacity.
strokeOpacity: Opacity of the polygon border: 0.0 (transparent) to 1.0
(opaque).
strokeWeight: Width of the polyon border in pixels.
strokeWeightStyler: Column-value or bucket styler that is used to
determine the width of the polygon border.
"""
fillColor = _messages.StringField(1)
fillColorStyler = _messages.MessageField('StyleFunction', 2)
fillOpacity = _messages.FloatField(3)
strokeColor = _messages.StringField(4)
strokeColorStyler = _messages.MessageField('StyleFunction', 5)
strokeOpacity = _messages.FloatField(6)
strokeWeight = _messages.IntegerField(7, variant=_messages.Variant.INT32)
strokeWeightStyler = _messages.MessageField('StyleFunction', 8)
class Sqlresponse(_messages.Message):
r"""Represents a response to an sql statement.
Messages:
RowsValueListEntry: Single entry in a RowsValue.
Fields:
columns: Columns in the table.
kind: Type name: a template for an individual table.
rows: The rows in the table. For each cell we print out whatever cell
value (e.g., numeric, string) exists. Thus it is important that each
cell contains only one value.
"""
class RowsValueListEntry(_messages.Message):
r"""Single entry in a RowsValue.
Fields:
entry: A extra_types.JsonValue attribute.
"""
entry = _messages.MessageField('extra_types.JsonValue', 1, repeated=True)
columns = _messages.StringField(1, repeated=True)
kind = _messages.StringField(2, default=u'fusiontables#sqlresponse')
rows = _messages.MessageField('RowsValueListEntry', 3, repeated=True)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for the response.
Values:
csv: Responses with Content-Type of text/csv
json: Responses with Content-Type of application/json
"""
csv = 0
json = 1
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class StyleFunction(_messages.Message):
r"""Represents a StyleFunction within a StyleSetting
Messages:
GradientValue: Gradient function that interpolates a range of colors based
on column value.
Fields:
buckets: Bucket function that assigns a style based on the range a column
value falls into.
columnName: Name of the column whose value is used in the style.
gradient: Gradient function that interpolates a range of colors based on
column value.
kind: Stylers can be one of three kinds: "fusiontables#fromColumn" if the
column value is to be used as is, i.e., the column values can have
colors in #RRGGBBAA format or integer line widths or icon names;
"fusiontables#gradient" if the styling of the row is to be based on
applying the gradient function on the column value; or
"fusiontables#buckets" if the styling is to based on the bucket into
which the the column value falls.
"""
class GradientValue(_messages.Message):
r"""Gradient function that interpolates a range of colors based on column
value.
Messages:
ColorsValueListEntry: A ColorsValueListEntry object.
Fields:
colors: Array with two or more colors.
max: Higher-end of the interpolation range: rows with this value will be
assigned to colors[n-1].
min: Lower-end of the interpolation range: rows with this value will be
assigned to colors[0].
"""
class ColorsValueListEntry(_messages.Message):
r"""A ColorsValueListEntry object.
Fields:
color: Color in #RRGGBB format.
opacity: Opacity of the color: 0.0 (transparent) to 1.0 (opaque).
"""
color = _messages.StringField(1)
opacity = _messages.FloatField(2)
colors = _messages.MessageField('ColorsValueListEntry', 1, repeated=True)
max = _messages.FloatField(2)
min = _messages.FloatField(3)
buckets = _messages.MessageField('Bucket', 1, repeated=True)
columnName = _messages.StringField(2)
gradient = _messages.MessageField('GradientValue', 3)
kind = _messages.StringField(4)
class StyleSetting(_messages.Message):
r"""Represents a complete StyleSettings object. The primary key is a
combination of the tableId and a styleId.
Fields:
kind: Type name: an individual style setting. A StyleSetting contains the
style defintions for points, lines, and polygons in a table. Since a
table can have any one or all of them, a style definition can have
point, line and polygon style definitions.
markerOptions: Style definition for points in the table.
name: Optional name for the style setting.
polygonOptions: Style definition for polygons in the table.
polylineOptions: Style definition for lines in the table.
styleId: Identifier for the style setting (unique only within tables).
tableId: Identifier for the table.
"""
kind = _messages.StringField(1, default=u'fusiontables#styleSetting')
markerOptions = _messages.MessageField('PointStyle', 2)
name = _messages.StringField(3)
polygonOptions = _messages.MessageField('PolygonStyle', 4)
polylineOptions = _messages.MessageField('LineStyle', 5)
styleId = _messages.IntegerField(6, variant=_messages.Variant.INT32)
tableId = _messages.StringField(7)
class StyleSettingList(_messages.Message):
r"""Represents a list of styles for a given table.
Fields:
items: All requested style settings.
kind: Type name: in this case, a list of style settings.
nextPageToken: Token used to access the next page of this result. No token
is displayed if there are no more pages left.
totalItems: Total number of styles for the table.
"""
items = _messages.MessageField('StyleSetting', 1, repeated=True)
kind = _messages.StringField(2, default=u'fusiontables#styleSettingList')
nextPageToken = _messages.StringField(3)
totalItems = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class Table(_messages.Message):
r"""Represents a table. Specifies the name, whether it is exportable,
description, attribution, and attribution link.
Fields:
attribution: Optional attribution assigned to the table.
attributionLink: Optional link for attribution.
baseTableIds: Optional base table identifier if this table is a view or
merged table.
columns: Columns in the table.
description: Optional description assigned to the table.
isExportable: Variable for whether table is exportable.
kind: Type name: a template for an individual table.
name: Name assigned to a table.
sql: Optional sql that encodes the table definition for derived tables.
tableId: Encrypted unique alphanumeric identifier for the table.
"""
attribution = _messages.StringField(1)
attributionLink = _messages.StringField(2)
baseTableIds = _messages.StringField(3, repeated=True)
columns = _messages.MessageField('Column', 4, repeated=True)
description = _messages.StringField(5)
isExportable = _messages.BooleanField(6)
kind = _messages.StringField(7, default=u'fusiontables#table')
name = _messages.StringField(8)
sql = _messages.StringField(9)
tableId = _messages.StringField(10)
class TableList(_messages.Message):
r"""Represents a list of tables.
Fields:
items: List of all requested tables.
kind: Type name: a list of all tables.
nextPageToken: Token used to access the next page of this result. No token
is displayed if there are no more pages left.
"""
items = _messages.MessageField('Table', 1, repeated=True)
kind = _messages.StringField(2, default=u'fusiontables#tableList')
nextPageToken = _messages.StringField(3)
class Task(_messages.Message):
r"""Specifies the identifier, name, and type of a task in a table.
Fields:
kind: Type of the resource. This is always "fusiontables#task".
progress: An indication of task progress.
started: false while the table is busy with some other task. true if this
background task is currently running.
taskId: Identifier for the task.
type: Type of background task. One of DELETE_ROWS Deletes one or more
rows from the table. ADD_ROWS "Adds one or more rows to a table.
Includes importing data into a new table and importing more rows into an
existing table. ADD_COLUMN Adds a new column to the table. CHANGE_TYPE
Changes the type of a column.
"""
kind = _messages.StringField(1, default=u'fusiontables#task')
progress = _messages.StringField(2)
started = _messages.BooleanField(3)
taskId = _messages.IntegerField(4)
type = _messages.StringField(5)
class TaskList(_messages.Message):
r"""Represents a list of tasks for a table.
Fields:
items: List of all requested tasks.
kind: Type of the resource. This is always "fusiontables#taskList".
nextPageToken: Token used to access the next page of this result. No token
is displayed if there are no more pages left.
totalItems: Total number of tasks for the table.
"""
items = _messages.MessageField('Task', 1, repeated=True)
kind = _messages.StringField(2, default=u'fusiontables#taskList')
nextPageToken = _messages.StringField(3)
totalItems = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class Template(_messages.Message):
r"""Represents the contents of InfoWindow templates.
Fields:
automaticColumnNames: List of columns from which the template is to be
automatically constructed. Only one of body or automaticColumns can be
specified.
body: Body of the template. It contains HTML with {column_name} to insert
values from a particular column. The body is sanitized to remove certain
tags, e.g., script. Only one of body or automaticColumns can be
specified.
kind: Type name: a template for the info window contents. The template can
either include an HTML body or a list of columns from which the template
is computed automatically.
name: Optional name assigned to a template.
tableId: Identifier for the table for which the template is defined.
templateId: Identifier for the template, unique within the context of a
particular table.
"""
automaticColumnNames = _messages.StringField(1, repeated=True)
body = _messages.StringField(2)
kind = _messages.StringField(3, default=u'fusiontables#template')
name = _messages.StringField(4)
tableId = _messages.StringField(5)
templateId = _messages.IntegerField(6, variant=_messages.Variant.INT32)
class TemplateList(_messages.Message):
r"""Represents a list of templates for a given table.
Fields:
items: List of all requested templates.
kind: Type name: a list of all templates.
nextPageToken: Token used to access the next page of this result. No token
is displayed if there are no more pages left.
totalItems: Total number of templates for the table.
"""
items = _messages.MessageField('Template', 1, repeated=True)
kind = _messages.StringField(2, default=u'fusiontables#templateList')
nextPageToken = _messages.StringField(3)
totalItems = _messages.IntegerField(4, variant=_messages.Variant.INT32)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/apitools/samples/fusiontables_sample/fusiontables_v1/fusiontables_v1_messages.py
| 0.900075 | 0.339609 |
fusiontables_v1_messages.py
|
pypi
|
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'bigquery'
class BigqueryDatasetsDeleteRequest(_messages.Message):
r"""A BigqueryDatasetsDeleteRequest object.
Fields:
datasetId: Dataset ID of dataset being deleted
deleteContents: If True, delete all the tables in the dataset. If False
and the dataset contains tables, the request will fail. Default is False
projectId: Project ID of the dataset being deleted
"""
datasetId = _messages.StringField(1, required=True)
deleteContents = _messages.BooleanField(2)
projectId = _messages.StringField(3, required=True)
class BigqueryDatasetsDeleteResponse(_messages.Message):
r"""An empty BigqueryDatasetsDelete response."""
class BigqueryDatasetsGetRequest(_messages.Message):
r"""A BigqueryDatasetsGetRequest object.
Fields:
datasetId: Dataset ID of the requested dataset
projectId: Project ID of the requested dataset
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryDatasetsInsertRequest(_messages.Message):
r"""A BigqueryDatasetsInsertRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
projectId: Project ID of the new dataset
"""
dataset = _messages.MessageField('Dataset', 1)
projectId = _messages.StringField(2, required=True)
class BigqueryDatasetsListRequest(_messages.Message):
r"""A BigqueryDatasetsListRequest object.
Fields:
all: Whether to list all datasets, including hidden ones
filter: An expression for filtering the results of the request by label.
The syntax is "labels.[:]". Multiple filters can be ANDed together by
connecting with a space. Example: "labels.department:receiving
labels.active". See https://cloud.google.com/bigquery/docs/labeling-
datasets#filtering_datasets_using_labels for details.
maxResults: The maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the datasets to be listed
"""
all = _messages.BooleanField(1)
filter = _messages.StringField(2)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
class BigqueryDatasetsPatchRequest(_messages.Message):
r"""A BigqueryDatasetsPatchRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
datasetId: Dataset ID of the dataset being updated
projectId: Project ID of the dataset being updated
"""
dataset = _messages.MessageField('Dataset', 1)
datasetId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class BigqueryDatasetsUpdateRequest(_messages.Message):
r"""A BigqueryDatasetsUpdateRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
datasetId: Dataset ID of the dataset being updated
projectId: Project ID of the dataset being updated
"""
dataset = _messages.MessageField('Dataset', 1)
datasetId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class BigqueryJobsCancelRequest(_messages.Message):
r"""A BigqueryJobsCancelRequest object.
Fields:
jobId: [Required] Job ID of the job to cancel
projectId: [Required] Project ID of the job to cancel
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsGetQueryResultsRequest(_messages.Message):
r"""A BigqueryJobsGetQueryResultsRequest object.
Fields:
jobId: [Required] Job ID of the query job
maxResults: Maximum number of results to read
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: [Required] Project ID of the query job
startIndex: Zero-based index of the starting row
timeoutMs: How long to wait for the query to complete, in milliseconds,
before returning. Default is 10 seconds. If the timeout passes before
the job completes, the 'jobComplete' field in the response will be false
"""
jobId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
startIndex = _messages.IntegerField(5, variant=_messages.Variant.UINT64)
timeoutMs = _messages.IntegerField(6, variant=_messages.Variant.UINT32)
class BigqueryJobsGetRequest(_messages.Message):
r"""A BigqueryJobsGetRequest object.
Fields:
jobId: [Required] Job ID of the requested job
projectId: [Required] Project ID of the requested job
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsInsertRequest(_messages.Message):
r"""A BigqueryJobsInsertRequest object.
Fields:
job: A Job resource to be passed as the request body.
projectId: Project ID of the project that will be billed for the job
"""
job = _messages.MessageField('Job', 1)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsListRequest(_messages.Message):
r"""A BigqueryJobsListRequest object.
Enums:
ProjectionValueValuesEnum: Restrict information returned to a set of
selected fields
StateFilterValueValuesEnum: Filter for job state
Fields:
allUsers: Whether to display jobs owned by all users in the project.
Default false
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the jobs to list
projection: Restrict information returned to a set of selected fields
stateFilter: Filter for job state
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Restrict information returned to a set of selected fields
Values:
full: Includes all job data
minimal: Does not include the job configuration
"""
full = 0
minimal = 1
class StateFilterValueValuesEnum(_messages.Enum):
r"""Filter for job state
Values:
done: Finished jobs
pending: Pending jobs
running: Running jobs
"""
done = 0
pending = 1
running = 2
allUsers = _messages.BooleanField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
stateFilter = _messages.EnumField('StateFilterValueValuesEnum', 6, repeated=True)
class BigqueryJobsQueryRequest(_messages.Message):
r"""A BigqueryJobsQueryRequest object.
Fields:
projectId: Project ID of the project billed for the query
queryRequest: A QueryRequest resource to be passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
queryRequest = _messages.MessageField('QueryRequest', 2)
class BigqueryProjectsListRequest(_messages.Message):
r"""A BigqueryProjectsListRequest object.
Fields:
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(2)
class BigqueryTabledataInsertAllRequest(_messages.Message):
r"""A BigqueryTabledataInsertAllRequest object.
Fields:
datasetId: Dataset ID of the destination table.
projectId: Project ID of the destination table.
tableDataInsertAllRequest: A TableDataInsertAllRequest resource to be
passed as the request body.
tableId: Table ID of the destination table.
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableDataInsertAllRequest = _messages.MessageField('TableDataInsertAllRequest', 3)
tableId = _messages.StringField(4, required=True)
class BigqueryTabledataListRequest(_messages.Message):
r"""A BigqueryTabledataListRequest object.
Fields:
datasetId: Dataset ID of the table to read
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, identifying the result
set
projectId: Project ID of the table to read
startIndex: Zero-based index of the starting row to read
tableId: Table ID of the table to read
"""
datasetId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
startIndex = _messages.IntegerField(5, variant=_messages.Variant.UINT64)
tableId = _messages.StringField(6, required=True)
class BigqueryTablesDeleteRequest(_messages.Message):
r"""A BigqueryTablesDeleteRequest object.
Fields:
datasetId: Dataset ID of the table to delete
projectId: Project ID of the table to delete
tableId: Table ID of the table to delete
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableId = _messages.StringField(3, required=True)
class BigqueryTablesDeleteResponse(_messages.Message):
r"""An empty BigqueryTablesDelete response."""
class BigqueryTablesGetRequest(_messages.Message):
r"""A BigqueryTablesGetRequest object.
Fields:
datasetId: Dataset ID of the requested table
projectId: Project ID of the requested table
tableId: Table ID of the requested table
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableId = _messages.StringField(3, required=True)
class BigqueryTablesInsertRequest(_messages.Message):
r"""A BigqueryTablesInsertRequest object.
Fields:
datasetId: Dataset ID of the new table
projectId: Project ID of the new table
table: A Table resource to be passed as the request body.
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
class BigqueryTablesListRequest(_messages.Message):
r"""A BigqueryTablesListRequest object.
Fields:
datasetId: Dataset ID of the tables to list
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the tables to list
"""
datasetId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
class BigqueryTablesPatchRequest(_messages.Message):
r"""A BigqueryTablesPatchRequest object.
Fields:
datasetId: Dataset ID of the table to update
projectId: Project ID of the table to update
table: A Table resource to be passed as the request body.
tableId: Table ID of the table to update
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
tableId = _messages.StringField(4, required=True)
class BigqueryTablesUpdateRequest(_messages.Message):
r"""A BigqueryTablesUpdateRequest object.
Fields:
datasetId: Dataset ID of the table to update
projectId: Project ID of the table to update
table: A Table resource to be passed as the request body.
tableId: Table ID of the table to update
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
tableId = _messages.StringField(4, required=True)
class BigtableColumn(_messages.Message):
r"""A BigtableColumn object.
Fields:
encoding: [Optional] The encoding of the values when the type is not
STRING. Acceptable encoding values are: TEXT - indicates values are
alphanumeric text strings. BINARY - indicates values are encoded using
HBase Bytes.toBytes family of functions. 'encoding' can also be set at
the column family level. However, the setting at this level takes
precedence if 'encoding' is set at both levels.
fieldName: [Optional] If the qualifier is not a valid BigQuery field
identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier
must be provided as the column field name and is used as field name in
queries.
onlyReadLatest: [Optional] If this is set, only the latest version of
value in this column are exposed. 'onlyReadLatest' can also be set at
the column family level. However, the setting at this level takes
precedence if 'onlyReadLatest' is set at both levels.
qualifierEncoded: [Required] Qualifier of the column. Columns in the
parent column family that has this exact qualifier are exposed as .
field. If the qualifier is valid UTF-8 string, it can be specified in
the qualifier_string field. Otherwise, a base-64 encoded value must be
set to qualifier_encoded. The column field name is the same as the
column qualifier. However, if the qualifier is not a valid BigQuery
field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid
identifier must be provided as field_name.
qualifierString: A string attribute.
type: [Optional] The type to convert the value in cells of this column.
The values are expected to be encoded using HBase Bytes.toBytes function
when using the BINARY encoding value. Following BigQuery types are
allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default
type is BYTES. 'type' can also be set at the column family level.
However, the setting at this level takes precedence if 'type' is set at
both levels.
"""
encoding = _messages.StringField(1)
fieldName = _messages.StringField(2)
onlyReadLatest = _messages.BooleanField(3)
qualifierEncoded = _messages.BytesField(4)
qualifierString = _messages.StringField(5)
type = _messages.StringField(6)
class BigtableColumnFamily(_messages.Message):
r"""A BigtableColumnFamily object.
Fields:
columns: [Optional] Lists of columns that should be exposed as individual
fields as opposed to a list of (column name, value) pairs. All columns
whose qualifier matches a qualifier in this list can be accessed as ..
Other columns can be accessed as a list through .Column field.
encoding: [Optional] The encoding of the values when the type is not
STRING. Acceptable encoding values are: TEXT - indicates values are
alphanumeric text strings. BINARY - indicates values are encoded using
HBase Bytes.toBytes family of functions. This can be overridden for a
specific column by listing that column in 'columns' and specifying an
encoding for it.
familyId: Identifier of the column family.
onlyReadLatest: [Optional] If this is set only the latest version of value
are exposed for all columns in this column family. This can be
overridden for a specific column by listing that column in 'columns' and
specifying a different setting for that column.
type: [Optional] The type to convert the value in cells of this column
family. The values are expected to be encoded using HBase Bytes.toBytes
function when using the BINARY encoding value. Following BigQuery types
are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN
Default type is BYTES. This can be overridden for a specific column by
listing that column in 'columns' and specifying a type for it.
"""
columns = _messages.MessageField('BigtableColumn', 1, repeated=True)
encoding = _messages.StringField(2)
familyId = _messages.StringField(3)
onlyReadLatest = _messages.BooleanField(4)
type = _messages.StringField(5)
class BigtableOptions(_messages.Message):
r"""A BigtableOptions object.
Fields:
columnFamilies: [Optional] List of column families to expose in the table
schema along with their types. This list restricts the column families
that can be referenced in queries and specifies their value types. You
can use this list to do type conversions - see the 'type' field for more
details. If you leave this list empty, all column families are present
in the table schema and their values are read as BYTES. During a query
only the column families referenced in that query are read from
Bigtable.
ignoreUnspecifiedColumnFamilies: [Optional] If field is true, then the
column families that are not specified in columnFamilies list are not
exposed in the table schema. Otherwise, they are read with BYTES type
values. The default value is false.
readRowkeyAsString: [Optional] If field is true, then the rowkey column
families will be read and converted to string. Otherwise they are read
with BYTES type values and users need to manually cast them with CAST if
necessary. The default value is false.
"""
columnFamilies = _messages.MessageField('BigtableColumnFamily', 1, repeated=True)
ignoreUnspecifiedColumnFamilies = _messages.BooleanField(2)
readRowkeyAsString = _messages.BooleanField(3)
class CsvOptions(_messages.Message):
r"""A CsvOptions object.
Fields:
allowJaggedRows: [Optional] Indicates if BigQuery should accept rows that
are missing trailing optional columns. If true, BigQuery treats missing
trailing columns as null values. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. The default
value is false.
allowQuotedNewlines: [Optional] Indicates if BigQuery should allow quoted
data sections that contain newline characters in a CSV file. The default
value is false.
encoding: [Optional] The character encoding of the data. The supported
values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery
decodes the data after the raw, binary data has been split using the
values of the quote and fieldDelimiter properties.
fieldDelimiter: [Optional] The separator for fields in a CSV file.
BigQuery converts the string to ISO-8859-1 encoding, and then uses the
first byte of the encoded string to split the data in its raw, binary
state. BigQuery also supports the escape sequence "\t" to specify a tab
separator. The default value is a comma (',').
quote: [Optional] The value that is used to quote data sections in a CSV
file. BigQuery converts the string to ISO-8859-1 encoding, and then uses
the first byte of the encoded string to split the data in its raw,
binary state. The default value is a double-quote ('"'). If your data
does not contain quoted sections, set the property value to an empty
string. If your data contains quoted newline characters, you must also
set the allowQuotedNewlines property to true.
skipLeadingRows: [Optional] The number of rows at the top of a CSV file
that BigQuery will skip when reading the data. The default value is 0.
This property is useful if you have header rows in the file that should
be skipped.
"""
allowJaggedRows = _messages.BooleanField(1)
allowQuotedNewlines = _messages.BooleanField(2)
encoding = _messages.StringField(3)
fieldDelimiter = _messages.StringField(4)
quote = _messages.StringField(5, default=u'"')
skipLeadingRows = _messages.IntegerField(6)
class Dataset(_messages.Message):
r"""A Dataset object.
Messages:
AccessValueListEntry: A AccessValueListEntry object.
LabelsValue: [Experimental] The labels associated with this dataset. You
can use these to organize and group your datasets. You can set this
property when inserting or updating a dataset. Label keys and values can
be no longer than 63 characters, can only contain letters, numeric
characters, underscores and dashes. International characters are
allowed. Label values are optional. Label keys must start with a letter
and must be unique within a dataset. Both keys and values are
additionally constrained to be <= 128 bytes in size.
Fields:
access: [Optional] An array of objects that define dataset access for one
or more entities. You can set this property when inserting or updating a
dataset in order to control who is allowed to access the data. If
unspecified at dataset creation time, BigQuery adds default dataset
access for the following entities: access.specialGroup: projectReaders;
access.role: READER; access.specialGroup: projectWriters; access.role:
WRITER; access.specialGroup: projectOwners; access.role: OWNER;
access.userByEmail: [dataset creator email]; access.role: OWNER;
creationTime: [Output-only] The time when this dataset was created, in
milliseconds since the epoch.
datasetReference: [Required] A reference that identifies the dataset.
defaultTableExpirationMs: [Optional] The default lifetime of all tables in
the dataset, in milliseconds. The minimum value is 3600000 milliseconds
(one hour). Once this property is set, all newly-created tables in the
dataset will have an expirationTime property set to the creation time
plus the value in this property, and changing the value will only affect
new tables, not existing ones. When the expirationTime for a given table
is reached, that table will be deleted automatically. If a table's
expirationTime is modified or removed before the table expires, or if
you provide an explicit expirationTime when creating a table, that value
takes precedence over the default expiration time indicated by this
property.
description: [Optional] A user-friendly description of the dataset.
etag: [Output-only] A hash of the resource.
friendlyName: [Optional] A descriptive name for the dataset.
id: [Output-only] The fully-qualified unique name of the dataset in the
format projectId:datasetId. The dataset name without the project name is
given in the datasetId field. When creating a new dataset, leave this
field blank, and instead specify the datasetId field.
kind: [Output-only] The resource type.
labels: [Experimental] The labels associated with this dataset. You can
use these to organize and group your datasets. You can set this property
when inserting or updating a dataset. Label keys and values can be no
longer than 63 characters, can only contain letters, numeric characters,
underscores and dashes. International characters are allowed. Label
values are optional. Label keys must start with a letter and must be
unique within a dataset. Both keys and values are additionally
constrained to be <= 128 bytes in size.
lastModifiedTime: [Output-only] The date when this dataset or any of its
tables was last modified, in milliseconds since the epoch.
location: [Experimental] The geographic location where the dataset should
reside. Possible values include EU and US. The default value is US.
selfLink: [Output-only] A URL that can be used to access the resource
again. You can use this URL in Get or Update requests to the resource.
"""
class AccessValueListEntry(_messages.Message):
r"""A AccessValueListEntry object.
Fields:
domain: [Pick one] A domain to grant access to. Any users signed in with
the domain specified will be granted the specified access. Example:
"example.com".
groupByEmail: [Pick one] An email address of a Google Group to grant
access to.
role: [Required] Describes the rights granted to the user specified by
the other member of the access object. The following string values are
supported: READER, WRITER, OWNER.
specialGroup: [Pick one] A special group to grant access to. Possible
values include: projectOwners: Owners of the enclosing project.
projectReaders: Readers of the enclosing project. projectWriters:
Writers of the enclosing project. allAuthenticatedUsers: All
authenticated BigQuery users.
userByEmail: [Pick one] An email address of a user to grant access to.
For example: [email protected].
view: [Pick one] A view from a different dataset to grant access to.
Queries executed against that view will have read access to tables in
this dataset. The role field is not required when this field is set.
If that view is updated by any user, access to the view needs to be
granted again via an update operation.
"""
domain = _messages.StringField(1)
groupByEmail = _messages.StringField(2)
role = _messages.StringField(3)
specialGroup = _messages.StringField(4)
userByEmail = _messages.StringField(5)
view = _messages.MessageField('TableReference', 6)
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""[Experimental] The labels associated with this dataset. You can use
these to organize and group your datasets. You can set this property when
inserting or updating a dataset. Label keys and values can be no longer
than 63 characters, can only contain letters, numeric characters,
underscores and dashes. International characters are allowed. Label values
are optional. Label keys must start with a letter and must be unique
within a dataset. Both keys and values are additionally constrained to be
<= 128 bytes in size.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
access = _messages.MessageField('AccessValueListEntry', 1, repeated=True)
creationTime = _messages.IntegerField(2)
datasetReference = _messages.MessageField('DatasetReference', 3)
defaultTableExpirationMs = _messages.IntegerField(4)
description = _messages.StringField(5)
etag = _messages.StringField(6)
friendlyName = _messages.StringField(7)
id = _messages.StringField(8)
kind = _messages.StringField(9, default=u'bigquery#dataset')
labels = _messages.MessageField('LabelsValue', 10)
lastModifiedTime = _messages.IntegerField(11)
location = _messages.StringField(12)
selfLink = _messages.StringField(13)
class DatasetList(_messages.Message):
r"""A DatasetList object.
Messages:
DatasetsValueListEntry: A DatasetsValueListEntry object.
Fields:
datasets: An array of the dataset resources in the project. Each resource
contains basic information. For full information about a particular
dataset resource, use the Datasets: get method. This property is omitted
when there are no datasets in the project.
etag: A hash value of the results page. You can use this property to
determine if the page has changed since the last request.
kind: The list type. This property always returns the value
"bigquery#datasetList".
nextPageToken: A token that can be used to request the next results page.
This property is omitted on the final results page.
"""
class DatasetsValueListEntry(_messages.Message):
r"""A DatasetsValueListEntry object.
Messages:
LabelsValue: [Experimental] The labels associated with this dataset. You
can use these to organize and group your datasets.
Fields:
datasetReference: The dataset reference. Use this property to access
specific parts of the dataset's ID, such as project ID or dataset ID.
friendlyName: A descriptive name for the dataset, if one exists.
id: The fully-qualified, unique, opaque ID of the dataset.
kind: The resource type. This property always returns the value
"bigquery#dataset".
labels: [Experimental] The labels associated with this dataset. You can
use these to organize and group your datasets.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""[Experimental] The labels associated with this dataset. You can use
these to organize and group your datasets.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
datasetReference = _messages.MessageField('DatasetReference', 1)
friendlyName = _messages.StringField(2)
id = _messages.StringField(3)
kind = _messages.StringField(4, default=u'bigquery#dataset')
labels = _messages.MessageField('LabelsValue', 5)
datasets = _messages.MessageField('DatasetsValueListEntry', 1, repeated=True)
etag = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#datasetList')
nextPageToken = _messages.StringField(4)
class DatasetReference(_messages.Message):
r"""A DatasetReference object.
Fields:
datasetId: [Required] A unique ID for this dataset, without the project
name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or
underscores (_). The maximum length is 1,024 characters.
projectId: [Optional] The ID of the project containing this dataset.
"""
datasetId = _messages.StringField(1)
projectId = _messages.StringField(2)
class ErrorProto(_messages.Message):
r"""A ErrorProto object.
Fields:
debugInfo: Debugging information. This property is internal to Google and
should not be used.
location: Specifies where the error occurred, if present.
message: A human-readable description of the error.
reason: A short error code that summarizes the error.
"""
debugInfo = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
reason = _messages.StringField(4)
class ExplainQueryStage(_messages.Message):
r"""A ExplainQueryStage object.
Fields:
computeRatioAvg: Relative amount of time the average shard spent on CPU-
bound tasks.
computeRatioMax: Relative amount of time the slowest shard spent on CPU-
bound tasks.
id: Unique ID for stage within plan.
name: Human-readable name for stage.
readRatioAvg: Relative amount of time the average shard spent reading
input.
readRatioMax: Relative amount of time the slowest shard spent reading
input.
recordsRead: Number of records read into the stage.
recordsWritten: Number of records written by the stage.
steps: List of operations within the stage in dependency order
(approximately chronological).
waitRatioAvg: Relative amount of time the average shard spent waiting to
be scheduled.
waitRatioMax: Relative amount of time the slowest shard spent waiting to
be scheduled.
writeRatioAvg: Relative amount of time the average shard spent on writing
output.
writeRatioMax: Relative amount of time the slowest shard spent on writing
output.
"""
computeRatioAvg = _messages.FloatField(1)
computeRatioMax = _messages.FloatField(2)
id = _messages.IntegerField(3)
name = _messages.StringField(4)
readRatioAvg = _messages.FloatField(5)
readRatioMax = _messages.FloatField(6)
recordsRead = _messages.IntegerField(7)
recordsWritten = _messages.IntegerField(8)
steps = _messages.MessageField('ExplainQueryStep', 9, repeated=True)
waitRatioAvg = _messages.FloatField(10)
waitRatioMax = _messages.FloatField(11)
writeRatioAvg = _messages.FloatField(12)
writeRatioMax = _messages.FloatField(13)
class ExplainQueryStep(_messages.Message):
r"""A ExplainQueryStep object.
Fields:
kind: Machine-readable operation type.
substeps: Human-readable stage descriptions.
"""
kind = _messages.StringField(1)
substeps = _messages.StringField(2, repeated=True)
class ExternalDataConfiguration(_messages.Message):
r"""A ExternalDataConfiguration object.
Fields:
autodetect: [Experimental] Try to detect schema and format options
automatically. Any option specified explicitly will be honored.
bigtableOptions: [Optional] Additional options if sourceFormat is set to
BIGTABLE.
compression: [Optional] The compression type of the data source. Possible
values include GZIP and NONE. The default value is NONE. This setting is
ignored for Google Cloud Bigtable, Google Cloud Datastore backups and
Avro formats.
csvOptions: Additional properties to set if sourceFormat is set to CSV.
googleSheetsOptions: [Optional] Additional options if sourceFormat is set
to GOOGLE_SHEETS.
ignoreUnknownValues: [Optional] Indicates if BigQuery should allow extra
values that are not represented in the table schema. If true, the extra
values are ignored. If false, records with extra columns are treated as
bad records, and if there are too many bad records, an invalid error is
returned in the job result. The default value is false. The sourceFormat
property determines what BigQuery treats as an extra value: CSV:
Trailing columns JSON: Named values that don't match any column names
Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore
backups: This setting is ignored. Avro: This setting is ignored.
maxBadRecords: [Optional] The maximum number of bad records that BigQuery
can ignore when reading data. If the number of bad records exceeds this
value, an invalid error is returned in the job result. The default value
is 0, which requires that all records are valid. This setting is ignored
for Google Cloud Bigtable, Google Cloud Datastore backups and Avro
formats.
schema: [Optional] The schema for the data. Schema is required for CSV and
JSON formats. Schema is disallowed for Google Cloud Bigtable, Cloud
Datastore backups, and Avro formats.
sourceFormat: [Required] The data format. For CSV files, specify "CSV".
For Google sheets, specify "GOOGLE_SHEETS". For newline-delimited JSON,
specify "NEWLINE_DELIMITED_JSON". For Avro files, specify "AVRO". For
Google Cloud Datastore backups, specify "DATASTORE_BACKUP".
[Experimental] For Google Cloud Bigtable, specify "BIGTABLE". Please
note that reading from Google Cloud Bigtable is experimental and has to
be enabled for your project. Please contact Google Cloud Support to
enable this for your project.
sourceUris: [Required] The fully-qualified URIs that point to your data in
Google Cloud. For Google Cloud Storage URIs: Each URI can contain one
'*' wildcard character and it must come after the 'bucket' name. Size
limits related to load jobs apply to external data sources. For Google
Cloud Bigtable URIs: Exactly one URI can be specified and it has be a
fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
For Google Cloud Datastore backups, exactly one URI can be specified,
and it must end with '.backup_info'. Also, the '*' wildcard character is
not allowed.
"""
autodetect = _messages.BooleanField(1)
bigtableOptions = _messages.MessageField('BigtableOptions', 2)
compression = _messages.StringField(3)
csvOptions = _messages.MessageField('CsvOptions', 4)
googleSheetsOptions = _messages.MessageField('GoogleSheetsOptions', 5)
ignoreUnknownValues = _messages.BooleanField(6)
maxBadRecords = _messages.IntegerField(7, variant=_messages.Variant.INT32)
schema = _messages.MessageField('TableSchema', 8)
sourceFormat = _messages.StringField(9)
sourceUris = _messages.StringField(10, repeated=True)
class GetQueryResultsResponse(_messages.Message):
r"""A GetQueryResultsResponse object.
Fields:
cacheHit: Whether the query result was fetched from the query cache.
errors: [Output-only] All errors and warnings encountered during the
running of the job. Errors here do not necessarily mean that the job has
completed or was unsuccessful.
etag: A hash of this response.
jobComplete: Whether the query has completed or not. If rows or totalRows
are present, this will always be true. If this is false, totalRows will
not be available.
jobReference: Reference to the BigQuery Job that was created to run the
query. This field will be present even if the original request timed
out, in which case GetQueryResults can be used to read the results once
the query has completed. Since this API only returns the first page of
results, subsequent pages can be fetched via the same mechanism
(GetQueryResults).
kind: The resource type of the response.
numDmlAffectedRows: [Output-only, Experimental] The number of rows
affected by a DML statement. Present only for DML statements INSERT,
UPDATE or DELETE.
pageToken: A token used for paging results.
rows: An object with as many results as can be contained within the
maximum permitted reply size. To get any additional rows, you can call
GetQueryResults and specify the jobReference returned above. Present
only when the query completes successfully.
schema: The schema of the results. Present only when the query completes
successfully.
totalBytesProcessed: The total number of bytes processed for this query.
totalRows: The total number of rows in the complete query result set,
which can be more than the number of rows in this single page of
results. Present only when the query completes successfully.
"""
cacheHit = _messages.BooleanField(1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
etag = _messages.StringField(3)
jobComplete = _messages.BooleanField(4)
jobReference = _messages.MessageField('JobReference', 5)
kind = _messages.StringField(6, default=u'bigquery#getQueryResultsResponse')
numDmlAffectedRows = _messages.IntegerField(7)
pageToken = _messages.StringField(8)
rows = _messages.MessageField('TableRow', 9, repeated=True)
schema = _messages.MessageField('TableSchema', 10)
totalBytesProcessed = _messages.IntegerField(11)
totalRows = _messages.IntegerField(12, variant=_messages.Variant.UINT64)
class GoogleSheetsOptions(_messages.Message):
r"""A GoogleSheetsOptions object.
Fields:
skipLeadingRows: [Optional] The number of rows at the top of a sheet that
BigQuery will skip when reading the data. The default value is 0. This
property is useful if you have header rows that should be skipped. When
autodetect is on, behavior is the following: * skipLeadingRows
unspecified - Autodetect tries to detect headers in the first row. If
they are not detected, the row is read as data. Otherwise data is read
starting from the second row. * skipLeadingRows is 0 - Instructs
autodetect that there are no headers and data should be read starting
from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1
rows and tries to detect headers in row N. If headers are not detected,
row N is just skipped. Otherwise row N is used to extract column names
for the detected schema.
"""
skipLeadingRows = _messages.IntegerField(1)
class Job(_messages.Message):
r"""A Job object.
Fields:
configuration: [Required] Describes the job configuration.
etag: [Output-only] A hash of this resource.
id: [Output-only] Opaque ID field of the job
jobReference: [Optional] Reference describing the unique-per-user name of
the job.
kind: [Output-only] The type of the resource.
selfLink: [Output-only] A URL that can be used to access this resource
again.
statistics: [Output-only] Information about the job, including starting
time and ending time of the job.
status: [Output-only] The status of this job. Examine this value when
polling an asynchronous job to see if the job is complete.
user_email: [Output-only] Email address of the user who ran the job.
"""
configuration = _messages.MessageField('JobConfiguration', 1)
etag = _messages.StringField(2)
id = _messages.StringField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#job')
selfLink = _messages.StringField(6)
statistics = _messages.MessageField('JobStatistics', 7)
status = _messages.MessageField('JobStatus', 8)
user_email = _messages.StringField(9)
class JobCancelResponse(_messages.Message):
r"""A JobCancelResponse object.
Fields:
job: The final state of the job.
kind: The resource type of the response.
"""
job = _messages.MessageField('Job', 1)
kind = _messages.StringField(2, default=u'bigquery#jobCancelResponse')
class JobConfiguration(_messages.Message):
r"""A JobConfiguration object.
Fields:
copy: [Pick one] Copies a table.
dryRun: [Optional] If set, don't actually run this job. A valid query will
return a mostly empty response with some processing statistics, while an
invalid query will return the same error it would if it wasn't a dry
run. Behavior of non-query jobs is undefined.
extract: [Pick one] Configures an extract job.
load: [Pick one] Configures a load job.
query: [Pick one] Configures a query job.
"""
copy = _messages.MessageField('JobConfigurationTableCopy', 1)
dryRun = _messages.BooleanField(2)
extract = _messages.MessageField('JobConfigurationExtract', 3)
load = _messages.MessageField('JobConfigurationLoad', 4)
query = _messages.MessageField('JobConfigurationQuery', 5)
class JobConfigurationExtract(_messages.Message):
r"""A JobConfigurationExtract object.
Fields:
compression: [Optional] The compression type to use for exported files.
Possible values include GZIP and NONE. The default value is NONE.
destinationFormat: [Optional] The exported file format. Possible values
include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV.
Tables with nested or repeated fields cannot be exported as CSV.
destinationUri: [Pick one] DEPRECATED: Use destinationUris instead,
passing only one URI as necessary. The fully-qualified Google Cloud
Storage URI where the extracted table should be written.
destinationUris: [Pick one] A list of fully-qualified Google Cloud Storage
URIs where the extracted table should be written.
fieldDelimiter: [Optional] Delimiter to use between fields in the exported
data. Default is ','
printHeader: [Optional] Whether to print out a header row in the results.
Default is true.
sourceTable: [Required] A reference to the table being exported.
"""
compression = _messages.StringField(1)
destinationFormat = _messages.StringField(2)
destinationUri = _messages.StringField(3)
destinationUris = _messages.StringField(4, repeated=True)
fieldDelimiter = _messages.StringField(5)
printHeader = _messages.BooleanField(6, default=True)
sourceTable = _messages.MessageField('TableReference', 7)
class JobConfigurationLoad(_messages.Message):
r"""A JobConfigurationLoad object.
Fields:
allowJaggedRows: [Optional] Accept rows that are missing trailing optional
columns. The missing values are treated as nulls. If false, records with
missing trailing columns are treated as bad records, and if there are
too many bad records, an invalid error is returned in the job result.
The default value is false. Only applicable to CSV, ignored for other
formats.
allowQuotedNewlines: Indicates if BigQuery should allow quoted data
sections that contain newline characters in a CSV file. The default
value is false.
autodetect: [Experimental] Indicates if we should automatically infer the
options and schema for CSV and JSON sources.
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
destinationTable: [Required] The destination table to load the data into.
encoding: [Optional] The character encoding of the data. The supported
values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery
decodes the data after the raw, binary data has been split using the
values of the quote and fieldDelimiter properties.
fieldDelimiter: [Optional] The separator for fields in a CSV file. The
separator can be any ISO-8859-1 single-byte character. To use a
character in the range 128-255, you must encode the character as UTF8.
BigQuery converts the string to ISO-8859-1 encoding, and then uses the
first byte of the encoded string to split the data in its raw, binary
state. BigQuery also supports the escape sequence "\t" to specify a tab
separator. The default value is a comma (',').
ignoreUnknownValues: [Optional] Indicates if BigQuery should allow extra
values that are not represented in the table schema. If true, the extra
values are ignored. If false, records with extra columns are treated as
bad records, and if there are too many bad records, an invalid error is
returned in the job result. The default value is false. The sourceFormat
property determines what BigQuery treats as an extra value: CSV:
Trailing columns JSON: Named values that don't match any column names
maxBadRecords: [Optional] The maximum number of bad records that BigQuery
can ignore when running the job. If the number of bad records exceeds
this value, an invalid error is returned in the job result. The default
value is 0, which requires that all records are valid.
projectionFields: [Experimental] If sourceFormat is set to
"DATASTORE_BACKUP", indicates which entity properties to load into
BigQuery from a Cloud Datastore backup. Property names are case
sensitive and must be top-level properties. If no properties are
specified, BigQuery loads all properties. If any named property isn't
found in the Cloud Datastore backup, an invalid error is returned in the
job result.
quote: [Optional] The value that is used to quote data sections in a CSV
file. BigQuery converts the string to ISO-8859-1 encoding, and then uses
the first byte of the encoded string to split the data in its raw,
binary state. The default value is a double-quote ('"'). If your data
does not contain quoted sections, set the property value to an empty
string. If your data contains quoted newline characters, you must also
set the allowQuotedNewlines property to true.
schema: [Optional] The schema for the destination table. The schema can be
omitted if the destination table already exists, or if you're loading
data from Google Cloud Datastore.
schemaInline: [Deprecated] The inline schema. For CSV schemas, specify as
"Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER,
baz:FLOAT".
schemaInlineFormat: [Deprecated] The format of the schemaInline property.
schemaUpdateOptions: [Experimental] Allows the schema of the desitination
table to be updated as a side effect of the load job. Schema update
options are supported in two cases: when writeDisposition is
WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the
destination table is a partition of a table, specified by partition
decorators. For normal tables, WRITE_TRUNCATE will always overwrite the
schema. One or more of the following values are specified:
ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.
ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original
schema to nullable.
skipLeadingRows: [Optional] The number of rows at the top of a CSV file
that BigQuery will skip when loading the data. The default value is 0.
This property is useful if you have header rows in the file that should
be skipped.
sourceFormat: [Optional] The format of the data files. For CSV files,
specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For
newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro,
specify "AVRO". The default value is CSV.
sourceUris: [Required] The fully-qualified URIs that point to your data in
Google Cloud Storage. Each URI can contain one '*' wildcard character
and it must come after the 'bucket' name.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_APPEND. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
allowJaggedRows = _messages.BooleanField(1)
allowQuotedNewlines = _messages.BooleanField(2)
autodetect = _messages.BooleanField(3)
createDisposition = _messages.StringField(4)
destinationTable = _messages.MessageField('TableReference', 5)
encoding = _messages.StringField(6)
fieldDelimiter = _messages.StringField(7)
ignoreUnknownValues = _messages.BooleanField(8)
maxBadRecords = _messages.IntegerField(9, variant=_messages.Variant.INT32)
projectionFields = _messages.StringField(10, repeated=True)
quote = _messages.StringField(11, default=u'"')
schema = _messages.MessageField('TableSchema', 12)
schemaInline = _messages.StringField(13)
schemaInlineFormat = _messages.StringField(14)
schemaUpdateOptions = _messages.StringField(15, repeated=True)
skipLeadingRows = _messages.IntegerField(16, variant=_messages.Variant.INT32)
sourceFormat = _messages.StringField(17)
sourceUris = _messages.StringField(18, repeated=True)
writeDisposition = _messages.StringField(19)
class JobConfigurationQuery(_messages.Message):
r"""A JobConfigurationQuery object.
Messages:
TableDefinitionsValue: [Optional] If querying an external data source
outside of BigQuery, describes the data format, location and other
properties of the data source. By defining these properties, the data
source can then be queried as if it were a standard BigQuery table.
Fields:
allowLargeResults: If true, allows the query to produce arbitrarily large
result tables at a slight cost in performance. Requires destinationTable
to be set.
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
defaultDataset: [Optional] Specifies the default dataset to use for
unqualified table names in the query.
destinationTable: [Optional] Describes the table where the query results
should be stored. If not present, a new table will be created to store
the results.
flattenResults: [Optional] Flattens all nested and repeated fields in the
query results. The default value is true. allowLargeResults must be true
if this is set to false.
maximumBillingTier: [Optional] Limits the billing tier for this job.
Queries that have resource usage beyond this tier will fail (without
incurring a charge). If unspecified, this will be set to your project
default.
maximumBytesBilled: [Optional] Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail (without
incurring a charge). If unspecified, this will be set to your project
default.
preserveNulls: [Deprecated] This property is deprecated.
priority: [Optional] Specifies a priority for the query. Possible values
include INTERACTIVE and BATCH. The default value is INTERACTIVE.
query: [Required] BigQuery SQL query to execute.
schemaUpdateOptions: [Experimental] Allows the schema of the desitination
table to be updated as a side effect of the query job. Schema update
options are supported in two cases: when writeDisposition is
WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the
destination table is a partition of a table, specified by partition
decorators. For normal tables, WRITE_TRUNCATE will always overwrite the
schema. One or more of the following values are specified:
ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.
ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original
schema to nullable.
tableDefinitions: [Optional] If querying an external data source outside
of BigQuery, describes the data format, location and other properties of
the data source. By defining these properties, the data source can then
be queried as if it were a standard BigQuery table.
useLegacySql: [Experimental] Specifies whether to use BigQuery's legacy
SQL dialect for this query. The default value is true. If set to false,
the query will use BigQuery's standard SQL:
https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is
set to false, the values of allowLargeResults and flattenResults are
ignored; query will be run as if allowLargeResults is true and
flattenResults is false.
useQueryCache: [Optional] Whether to look for the result in the query
cache. The query cache is a best-effort cache that will be flushed
whenever tables in the query are modified. Moreover, the query cache is
only available when a query does not have a destination table specified.
The default value is true.
userDefinedFunctionResources: [Experimental] Describes user-defined
function resources used in the query.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_EMPTY. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class TableDefinitionsValue(_messages.Message):
r"""[Optional] If querying an external data source outside of BigQuery,
describes the data format, location and other properties of the data
source. By defining these properties, the data source can then be queried
as if it were a standard BigQuery table.
Messages:
AdditionalProperty: An additional property for a TableDefinitionsValue
object.
Fields:
additionalProperties: Additional properties of type
TableDefinitionsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a TableDefinitionsValue object.
Fields:
key: Name of the additional property.
value: A ExternalDataConfiguration attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('ExternalDataConfiguration', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
allowLargeResults = _messages.BooleanField(1)
createDisposition = _messages.StringField(2)
defaultDataset = _messages.MessageField('DatasetReference', 3)
destinationTable = _messages.MessageField('TableReference', 4)
flattenResults = _messages.BooleanField(5, default=True)
maximumBillingTier = _messages.IntegerField(6, variant=_messages.Variant.INT32, default=1)
maximumBytesBilled = _messages.IntegerField(7)
preserveNulls = _messages.BooleanField(8)
priority = _messages.StringField(9)
query = _messages.StringField(10)
schemaUpdateOptions = _messages.StringField(11, repeated=True)
tableDefinitions = _messages.MessageField('TableDefinitionsValue', 12)
useLegacySql = _messages.BooleanField(13)
useQueryCache = _messages.BooleanField(14, default=True)
userDefinedFunctionResources = _messages.MessageField('UserDefinedFunctionResource', 15, repeated=True)
writeDisposition = _messages.StringField(16)
class JobConfigurationTableCopy(_messages.Message):
r"""A JobConfigurationTableCopy object.
Fields:
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
destinationTable: [Required] The destination table
sourceTable: [Pick one] Source table to copy.
sourceTables: [Pick one] Source tables to copy.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_EMPTY. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
createDisposition = _messages.StringField(1)
destinationTable = _messages.MessageField('TableReference', 2)
sourceTable = _messages.MessageField('TableReference', 3)
sourceTables = _messages.MessageField('TableReference', 4, repeated=True)
writeDisposition = _messages.StringField(5)
class JobList(_messages.Message):
r"""A JobList object.
Messages:
JobsValueListEntry: A JobsValueListEntry object.
Fields:
etag: A hash of this page of results.
jobs: List of jobs that were requested.
kind: The resource type of the response.
nextPageToken: A token to request the next page of results.
"""
class JobsValueListEntry(_messages.Message):
r"""A JobsValueListEntry object.
Fields:
configuration: [Full-projection-only] Specifies the job configuration.
errorResult: A result object that will be present only if the job has
failed.
id: Unique opaque ID of the job.
jobReference: Job reference uniquely identifying the job.
kind: The resource type.
state: Running state of the job. When the state is DONE, errorResult can
be checked to determine whether the job succeeded or failed.
statistics: [Output-only] Information about the job, including starting
time and ending time of the job.
status: [Full-projection-only] Describes the state of the job.
user_email: [Full-projection-only] Email address of the user who ran the
job.
"""
configuration = _messages.MessageField('JobConfiguration', 1)
errorResult = _messages.MessageField('ErrorProto', 2)
id = _messages.StringField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#job')
state = _messages.StringField(6)
statistics = _messages.MessageField('JobStatistics', 7)
status = _messages.MessageField('JobStatus', 8)
user_email = _messages.StringField(9)
etag = _messages.StringField(1)
jobs = _messages.MessageField('JobsValueListEntry', 2, repeated=True)
kind = _messages.StringField(3, default=u'bigquery#jobList')
nextPageToken = _messages.StringField(4)
class JobReference(_messages.Message):
r"""A JobReference object.
Fields:
jobId: [Required] The ID of the job. The ID must contain only letters
(a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum
length is 1,024 characters.
projectId: [Required] The ID of the project containing this job.
"""
jobId = _messages.StringField(1)
projectId = _messages.StringField(2)
class JobStatistics(_messages.Message):
r"""A JobStatistics object.
Fields:
creationTime: [Output-only] Creation time of this job, in milliseconds
since the epoch. This field will be present on all jobs.
endTime: [Output-only] End time of this job, in milliseconds since the
epoch. This field will be present whenever a job is in the DONE state.
extract: [Output-only] Statistics for an extract job.
load: [Output-only] Statistics for a load job.
query: [Output-only] Statistics for a query job.
startTime: [Output-only] Start time of this job, in milliseconds since the
epoch. This field will be present when the job transitions from the
PENDING state to either RUNNING or DONE.
totalBytesProcessed: [Output-only] [Deprecated] Use the bytes processed in
the query statistics instead.
"""
creationTime = _messages.IntegerField(1)
endTime = _messages.IntegerField(2)
extract = _messages.MessageField('JobStatistics4', 3)
load = _messages.MessageField('JobStatistics3', 4)
query = _messages.MessageField('JobStatistics2', 5)
startTime = _messages.IntegerField(6)
totalBytesProcessed = _messages.IntegerField(7)
class JobStatistics2(_messages.Message):
r"""A JobStatistics2 object.
Fields:
billingTier: [Output-only] Billing tier for the job.
cacheHit: [Output-only] Whether the query result was fetched from the
query cache.
numDmlAffectedRows: [Output-only, Experimental] The number of rows
affected by a DML statement. Present only for DML statements INSERT,
UPDATE or DELETE.
queryPlan: [Output-only, Experimental] Describes execution plan for the
query.
referencedTables: [Output-only, Experimental] Referenced tables for the
job. Queries that reference more than 50 tables will not have a complete
list.
schema: [Output-only, Experimental] The schema of the results. Present
only for successful dry run of non-legacy SQL queries.
totalBytesBilled: [Output-only] Total bytes billed for the job.
totalBytesProcessed: [Output-only] Total bytes processed for the job.
"""
billingTier = _messages.IntegerField(1, variant=_messages.Variant.INT32)
cacheHit = _messages.BooleanField(2)
numDmlAffectedRows = _messages.IntegerField(3)
queryPlan = _messages.MessageField('ExplainQueryStage', 4, repeated=True)
referencedTables = _messages.MessageField('TableReference', 5, repeated=True)
schema = _messages.MessageField('TableSchema', 6)
totalBytesBilled = _messages.IntegerField(7)
totalBytesProcessed = _messages.IntegerField(8)
class JobStatistics3(_messages.Message):
r"""A JobStatistics3 object.
Fields:
inputFileBytes: [Output-only] Number of bytes of source data in a load
job.
inputFiles: [Output-only] Number of source files in a load job.
outputBytes: [Output-only] Size of the loaded data in bytes. Note that
while a load job is in the running state, this value may change.
outputRows: [Output-only] Number of rows imported in a load job. Note that
while an import job is in the running state, this value may change.
"""
inputFileBytes = _messages.IntegerField(1)
inputFiles = _messages.IntegerField(2)
outputBytes = _messages.IntegerField(3)
outputRows = _messages.IntegerField(4)
class JobStatistics4(_messages.Message):
r"""A JobStatistics4 object.
Fields:
destinationUriFileCounts: [Output-only] Number of files per destination
URI or URI pattern specified in the extract configuration. These values
will be in the same order as the URIs specified in the 'destinationUris'
field.
"""
destinationUriFileCounts = _messages.IntegerField(1, repeated=True)
class JobStatus(_messages.Message):
r"""A JobStatus object.
Fields:
errorResult: [Output-only] Final error result of the job. If present,
indicates that the job has completed and was unsuccessful.
errors: [Output-only] All errors encountered during the running of the
job. Errors here do not necessarily mean that the job has completed or
was unsuccessful.
state: [Output-only] Running state of the job.
"""
errorResult = _messages.MessageField('ErrorProto', 1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
state = _messages.StringField(3)
@encoding.MapUnrecognizedFields('additionalProperties')
class JsonObject(_messages.Message):
r"""Represents a single JSON object.
Messages:
AdditionalProperty: An additional property for a JsonObject object.
Fields:
additionalProperties: Additional properties of type JsonObject
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a JsonObject object.
Fields:
key: Name of the additional property.
value: A JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
JsonValue = extra_types.JsonValue
class ProjectList(_messages.Message):
r"""A ProjectList object.
Messages:
ProjectsValueListEntry: A ProjectsValueListEntry object.
Fields:
etag: A hash of the page of results
kind: The type of list.
nextPageToken: A token to request the next page of results.
projects: Projects to which you have at least READ access.
totalItems: The total number of projects in the list.
"""
class ProjectsValueListEntry(_messages.Message):
r"""A ProjectsValueListEntry object.
Fields:
friendlyName: A descriptive name for this project.
id: An opaque ID of this project.
kind: The resource type.
numericId: The numeric ID of this project.
projectReference: A unique reference to this project.
"""
friendlyName = _messages.StringField(1)
id = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#project')
numericId = _messages.IntegerField(4, variant=_messages.Variant.UINT64)
projectReference = _messages.MessageField('ProjectReference', 5)
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#projectList')
nextPageToken = _messages.StringField(3)
projects = _messages.MessageField('ProjectsValueListEntry', 4, repeated=True)
totalItems = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class ProjectReference(_messages.Message):
r"""A ProjectReference object.
Fields:
projectId: [Required] ID of the project. Can be either the numeric ID or
the assigned ID of the project.
"""
projectId = _messages.StringField(1)
class QueryRequest(_messages.Message):
r"""A QueryRequest object.
Fields:
defaultDataset: [Optional] Specifies the default datasetId and projectId
to assume for any unqualified table names in the query. If not set, all
table names in the query string must be qualified in the format
'datasetId.tableId'.
dryRun: [Optional] If set to true, BigQuery doesn't run the job. Instead,
if the query is valid, BigQuery returns statistics about the job such as
how many bytes would be processed. If the query is invalid, an error
returns. The default value is false.
kind: The resource type of the request.
maxResults: [Optional] The maximum number of rows of data to return per
page of results. Setting this flag to a small value such as 1000 and
then paging through results might improve reliability when the query
result set is large. In addition to this limit, responses are also
limited to 10 MB. By default, there is no maximum row count, and only
the byte limit applies.
preserveNulls: [Deprecated] This property is deprecated.
query: [Required] A query string, following the BigQuery query syntax, of
the query to execute. Example: "SELECT count(f1) FROM
[myProjectId:myDatasetId.myTableId]".
timeoutMs: [Optional] How long to wait for the query to complete, in
milliseconds, before the request times out and returns. Note that this
is only a timeout for the request, not the query. If the query takes
longer to run than the timeout value, the call returns without any
results and with the 'jobComplete' flag set to false. You can call
GetQueryResults() to wait for the query to complete and read the
results. The default value is 10000 milliseconds (10 seconds).
useLegacySql: [Experimental] Specifies whether to use BigQuery's legacy
SQL dialect for this query. The default value is true. If set to false,
the query will use BigQuery's standard SQL:
https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is
set to false, the values of allowLargeResults and flattenResults are
ignored; query will be run as if allowLargeResults is true and
flattenResults is false.
useQueryCache: [Optional] Whether to look for the result in the query
cache. The query cache is a best-effort cache that will be flushed
whenever tables in the query are modified. The default value is true.
"""
defaultDataset = _messages.MessageField('DatasetReference', 1)
dryRun = _messages.BooleanField(2)
kind = _messages.StringField(3, default=u'bigquery#queryRequest')
maxResults = _messages.IntegerField(4, variant=_messages.Variant.UINT32)
preserveNulls = _messages.BooleanField(5)
query = _messages.StringField(6)
timeoutMs = _messages.IntegerField(7, variant=_messages.Variant.UINT32)
useLegacySql = _messages.BooleanField(8)
useQueryCache = _messages.BooleanField(9, default=True)
class QueryResponse(_messages.Message):
r"""A QueryResponse object.
Fields:
cacheHit: Whether the query result was fetched from the query cache.
errors: [Output-only] All errors and warnings encountered during the
running of the job. Errors here do not necessarily mean that the job has
completed or was unsuccessful.
jobComplete: Whether the query has completed or not. If rows or totalRows
are present, this will always be true. If this is false, totalRows will
not be available.
jobReference: Reference to the Job that was created to run the query. This
field will be present even if the original request timed out, in which
case GetQueryResults can be used to read the results once the query has
completed. Since this API only returns the first page of results,
subsequent pages can be fetched via the same mechanism
(GetQueryResults).
kind: The resource type.
numDmlAffectedRows: [Output-only, Experimental] The number of rows
affected by a DML statement. Present only for DML statements INSERT,
UPDATE or DELETE.
pageToken: A token used for paging results.
rows: An object with as many results as can be contained within the
maximum permitted reply size. To get any additional rows, you can call
GetQueryResults and specify the jobReference returned above.
schema: The schema of the results. Present only when the query completes
successfully.
totalBytesProcessed: The total number of bytes processed for this query.
If this query was a dry run, this is the number of bytes that would be
processed if the query were run.
totalRows: The total number of rows in the complete query result set,
which can be more than the number of rows in this single page of
results.
"""
cacheHit = _messages.BooleanField(1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
jobComplete = _messages.BooleanField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#queryResponse')
numDmlAffectedRows = _messages.IntegerField(6)
pageToken = _messages.StringField(7)
rows = _messages.MessageField('TableRow', 8, repeated=True)
schema = _messages.MessageField('TableSchema', 9)
totalBytesProcessed = _messages.IntegerField(10)
totalRows = _messages.IntegerField(11, variant=_messages.Variant.UINT64)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class Streamingbuffer(_messages.Message):
r"""A Streamingbuffer object.
Fields:
estimatedBytes: [Output-only] A lower-bound estimate of the number of
bytes currently in the streaming buffer.
estimatedRows: [Output-only] A lower-bound estimate of the number of rows
currently in the streaming buffer.
oldestEntryTime: [Output-only] Contains the timestamp of the oldest entry
in the streaming buffer, in milliseconds since the epoch, if the
streaming buffer is available.
"""
estimatedBytes = _messages.IntegerField(1, variant=_messages.Variant.UINT64)
estimatedRows = _messages.IntegerField(2, variant=_messages.Variant.UINT64)
oldestEntryTime = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
class Table(_messages.Message):
r"""A Table object.
Fields:
creationTime: [Output-only] The time when this table was created, in
milliseconds since the epoch.
description: [Optional] A user-friendly description of this table.
etag: [Output-only] A hash of this resource.
expirationTime: [Optional] The time when this table expires, in
milliseconds since the epoch. If not present, the table will persist
indefinitely. Expired tables will be deleted and their storage
reclaimed.
externalDataConfiguration: [Optional] Describes the data format, location,
and other properties of a table stored outside of BigQuery. By defining
these properties, the data source can then be queried as if it were a
standard BigQuery table.
friendlyName: [Optional] A descriptive name for this table.
id: [Output-only] An opaque ID uniquely identifying the table.
kind: [Output-only] The type of the resource.
lastModifiedTime: [Output-only] The time when this table was last
modified, in milliseconds since the epoch.
location: [Output-only] The geographic location where the table resides.
This value is inherited from the dataset.
numBytes: [Output-only] The size of this table in bytes, excluding any
data in the streaming buffer.
numLongTermBytes: [Output-only] The number of bytes in the table that are
considered "long-term storage".
numRows: [Output-only] The number of rows of data in this table, excluding
any data in the streaming buffer.
schema: [Optional] Describes the schema of this table.
selfLink: [Output-only] A URL that can be used to access this resource
again.
streamingBuffer: [Output-only] Contains information regarding this table's
streaming buffer, if one is present. This field will be absent if the
table is not being streamed to or if there is no data in the streaming
buffer.
tableReference: [Required] Reference describing the ID of this table.
timePartitioning: [Experimental] If specified, configures time-based
partitioning for this table.
type: [Output-only] Describes the table type. The following values are
supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined
by a SQL query. EXTERNAL: A table that references data stored in an
external storage system, such as Google Cloud Storage. The default value
is TABLE.
view: [Optional] The view definition.
"""
creationTime = _messages.IntegerField(1)
description = _messages.StringField(2)
etag = _messages.StringField(3)
expirationTime = _messages.IntegerField(4)
externalDataConfiguration = _messages.MessageField('ExternalDataConfiguration', 5)
friendlyName = _messages.StringField(6)
id = _messages.StringField(7)
kind = _messages.StringField(8, default=u'bigquery#table')
lastModifiedTime = _messages.IntegerField(9, variant=_messages.Variant.UINT64)
location = _messages.StringField(10)
numBytes = _messages.IntegerField(11)
numLongTermBytes = _messages.IntegerField(12)
numRows = _messages.IntegerField(13, variant=_messages.Variant.UINT64)
schema = _messages.MessageField('TableSchema', 14)
selfLink = _messages.StringField(15)
streamingBuffer = _messages.MessageField('Streamingbuffer', 16)
tableReference = _messages.MessageField('TableReference', 17)
timePartitioning = _messages.MessageField('TimePartitioning', 18)
type = _messages.StringField(19)
view = _messages.MessageField('ViewDefinition', 20)
class TableCell(_messages.Message):
r"""A TableCell object.
Fields:
v: A extra_types.JsonValue attribute.
"""
v = _messages.MessageField('extra_types.JsonValue', 1)
class TableDataInsertAllRequest(_messages.Message):
r"""A TableDataInsertAllRequest object.
Messages:
RowsValueListEntry: A RowsValueListEntry object.
Fields:
ignoreUnknownValues: [Optional] Accept rows that contain values that do
not match the schema. The unknown values are ignored. Default is false,
which treats unknown values as errors.
kind: The resource type of the response.
rows: The rows to insert.
skipInvalidRows: [Optional] Insert all valid rows of a request, even if
invalid rows exist. The default value is false, which causes the entire
request to fail if any invalid rows exist.
templateSuffix: [Experimental] If specified, treats the destination table
as a base template, and inserts the rows into an instance table named
"{destination}{templateSuffix}". BigQuery will manage creation of the
instance table, using the schema of the base template table. See
https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-
tables for considerations when working with templates tables.
"""
class RowsValueListEntry(_messages.Message):
r"""A RowsValueListEntry object.
Fields:
insertId: [Optional] A unique ID for each row. BigQuery uses this
property to detect duplicate insertion requests on a best-effort
basis.
json: [Required] A JSON object that contains a row of data. The object's
properties and values must match the destination table's schema.
"""
insertId = _messages.StringField(1)
json = _messages.MessageField('JsonObject', 2)
ignoreUnknownValues = _messages.BooleanField(1)
kind = _messages.StringField(2, default=u'bigquery#tableDataInsertAllRequest')
rows = _messages.MessageField('RowsValueListEntry', 3, repeated=True)
skipInvalidRows = _messages.BooleanField(4)
templateSuffix = _messages.StringField(5)
class TableDataInsertAllResponse(_messages.Message):
r"""A TableDataInsertAllResponse object.
Messages:
InsertErrorsValueListEntry: A InsertErrorsValueListEntry object.
Fields:
insertErrors: An array of errors for rows that were not inserted.
kind: The resource type of the response.
"""
class InsertErrorsValueListEntry(_messages.Message):
r"""A InsertErrorsValueListEntry object.
Fields:
errors: Error information for the row indicated by the index property.
index: The index of the row that error applies to.
"""
errors = _messages.MessageField('ErrorProto', 1, repeated=True)
index = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
insertErrors = _messages.MessageField('InsertErrorsValueListEntry', 1, repeated=True)
kind = _messages.StringField(2, default=u'bigquery#tableDataInsertAllResponse')
class TableDataList(_messages.Message):
r"""A TableDataList object.
Fields:
etag: A hash of this page of results.
kind: The resource type of the response.
pageToken: A token used for paging results. Providing this token instead
of the startIndex parameter can help you retrieve stable results when an
underlying table is changing.
rows: Rows of results.
totalRows: The total number of rows in the complete table.
"""
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#tableDataList')
pageToken = _messages.StringField(3)
rows = _messages.MessageField('TableRow', 4, repeated=True)
totalRows = _messages.IntegerField(5)
class TableFieldSchema(_messages.Message):
r"""A TableFieldSchema object.
Fields:
description: [Optional] The field description. The maximum length is 16K
characters.
fields: [Optional] Describes the nested schema fields if the type property
is set to RECORD.
mode: [Optional] The field mode. Possible values include NULLABLE,
REQUIRED and REPEATED. The default value is NULLABLE.
name: [Required] The field name. The name must contain only letters (a-z,
A-Z), numbers (0-9), or underscores (_), and must start with a letter or
underscore. The maximum length is 128 characters.
type: [Required] The field data type. Possible values include STRING,
BYTES, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD
indicates that the field contains a nested schema).
"""
description = _messages.StringField(1)
fields = _messages.MessageField('TableFieldSchema', 2, repeated=True)
mode = _messages.StringField(3)
name = _messages.StringField(4)
type = _messages.StringField(5)
class TableList(_messages.Message):
r"""A TableList object.
Messages:
TablesValueListEntry: A TablesValueListEntry object.
Fields:
etag: A hash of this page of results.
kind: The type of list.
nextPageToken: A token to request the next page of results.
tables: Tables in the requested dataset.
totalItems: The total number of tables in the dataset.
"""
class TablesValueListEntry(_messages.Message):
r"""A TablesValueListEntry object.
Fields:
friendlyName: The user-friendly name for this table.
id: An opaque ID of the table
kind: The resource type.
tableReference: A reference uniquely identifying the table.
type: The type of table. Possible values are: TABLE, VIEW.
"""
friendlyName = _messages.StringField(1)
id = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#table')
tableReference = _messages.MessageField('TableReference', 4)
type = _messages.StringField(5)
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#tableList')
nextPageToken = _messages.StringField(3)
tables = _messages.MessageField('TablesValueListEntry', 4, repeated=True)
totalItems = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class TableReference(_messages.Message):
r"""A TableReference object.
Fields:
datasetId: [Required] The ID of the dataset containing this table.
projectId: [Required] The ID of the project containing this table.
tableId: [Required] The ID of the table. The ID must contain only letters
(a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is
1,024 characters.
"""
datasetId = _messages.StringField(1)
projectId = _messages.StringField(2)
tableId = _messages.StringField(3)
class TableRow(_messages.Message):
r"""A TableRow object.
Fields:
f: Represents a single row in the result set, consisting of one or more
fields.
"""
f = _messages.MessageField('TableCell', 1, repeated=True)
class TableSchema(_messages.Message):
r"""A TableSchema object.
Fields:
fields: Describes the fields in a table.
"""
fields = _messages.MessageField('TableFieldSchema', 1, repeated=True)
class TimePartitioning(_messages.Message):
r"""A TimePartitioning object.
Fields:
expirationMs: [Optional] Number of milliseconds for which to keep the
storage for a partition.
type: [Required] The only type supported is DAY, which will generate one
partition per day based on data loading time.
"""
expirationMs = _messages.IntegerField(1)
type = _messages.StringField(2)
class UserDefinedFunctionResource(_messages.Message):
r"""A UserDefinedFunctionResource object.
Fields:
inlineCode: [Pick one] An inline resource that contains code for a user-
defined function (UDF). Providing a inline code resource is equivalent
to providing a URI for a file containing the same code.
resourceUri: [Pick one] A code resource to load from a Google Cloud
Storage URI (gs://bucket/path).
"""
inlineCode = _messages.StringField(1)
resourceUri = _messages.StringField(2)
class ViewDefinition(_messages.Message):
r"""A ViewDefinition object.
Fields:
query: [Required] A query that BigQuery executes when the view is
referenced.
useLegacySql: [Experimental] Specifies whether to use BigQuery's legacy
SQL for this view. The default value is true. If set to false, the view
will use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-
reference/ Queries and views that reference this view must use the same
flag value.
userDefinedFunctionResources: [Experimental] Describes user-defined
function resources used in the query.
"""
query = _messages.StringField(1)
useLegacySql = _messages.BooleanField(2)
userDefinedFunctionResources = _messages.MessageField('UserDefinedFunctionResource', 3, repeated=True)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/apitools/samples/bigquery_sample/bigquery_v2/bigquery_v2_messages.py
| 0.846895 | 0.308893 |
bigquery_v2_messages.py
|
pypi
|
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'iam'
class AuditConfig(_messages.Message):
r"""Enables "data access" audit logging for a service and specifies a list
of members that are log-exempted.
Fields:
exemptedMembers: Specifies the identities that are exempted from "data
access" audit logging for the `service` specified above. Follows the
same format of Binding.members.
service: Specifies a service that will be enabled for "data access" audit
logging. For example, `resourcemanager`, `storage`, `compute`.
`allServices` is a special value that covers all services.
"""
exemptedMembers = _messages.StringField(1, repeated=True)
service = _messages.StringField(2)
class Binding(_messages.Message):
r"""Associates `members` with a `role`.
Fields:
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `[email protected]`
or `[email protected]`. * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `my-other-
[email protected]`. * `group:{emailid}`: An email address
that represents a Google group. For example, `[email protected]`. *
`domain:{domain}`: A Google Apps domain name that represents all the
users of that domain. For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`. Required
"""
members = _messages.StringField(1, repeated=True)
role = _messages.StringField(2)
class CloudAuditOptions(_messages.Message):
r"""Write a Cloud Audit log"""
class Condition(_messages.Message):
r"""A condition to be met.
Enums:
IamValueValuesEnum: Trusted attributes supplied by the IAM system.
OpValueValuesEnum: An operator to apply the subject with.
SysValueValuesEnum: Trusted attributes supplied by any service that owns
resources and uses the IAM system for access control.
Fields:
iam: Trusted attributes supplied by the IAM system.
op: An operator to apply the subject with.
svc: Trusted attributes discharged by the service.
sys: Trusted attributes supplied by any service that owns resources and
uses the IAM system for access control.
value: DEPRECATED. Use 'values' instead.
values: The objects of the condition. This is mutually exclusive with
'value'.
"""
class IamValueValuesEnum(_messages.Enum):
r"""Trusted attributes supplied by the IAM system.
Values:
NO_ATTR: Default non-attribute.
AUTHORITY: Either principal or (if present) authority
ATTRIBUTION: selector Always the original principal, but making clear
"""
NO_ATTR = 0
AUTHORITY = 1
ATTRIBUTION = 2
class OpValueValuesEnum(_messages.Enum):
r"""An operator to apply the subject with.
Values:
NO_OP: Default no-op.
EQUALS: DEPRECATED. Use IN instead.
NOT_EQUALS: DEPRECATED. Use NOT_IN instead.
IN: Set-inclusion check.
NOT_IN: Set-exclusion check.
DISCHARGED: Subject is discharged
"""
NO_OP = 0
EQUALS = 1
NOT_EQUALS = 2
IN = 3
NOT_IN = 4
DISCHARGED = 5
class SysValueValuesEnum(_messages.Enum):
r"""Trusted attributes supplied by any service that owns resources and
uses the IAM system for access control.
Values:
NO_ATTR: Default non-attribute type
REGION: Region of the resource
SERVICE: Service name
NAME: Resource name
IP: IP address of the caller
"""
NO_ATTR = 0
REGION = 1
SERVICE = 2
NAME = 3
IP = 4
iam = _messages.EnumField('IamValueValuesEnum', 1)
op = _messages.EnumField('OpValueValuesEnum', 2)
svc = _messages.StringField(3)
sys = _messages.EnumField('SysValueValuesEnum', 4)
value = _messages.StringField(5)
values = _messages.StringField(6, repeated=True)
class CounterOptions(_messages.Message):
r"""Options for counters
Fields:
field: The field value to attribute.
metric: The metric to update.
"""
field = _messages.StringField(1)
metric = _messages.StringField(2)
class CreateServiceAccountKeyRequest(_messages.Message):
r"""The service account key create request.
Enums:
PrivateKeyTypeValueValuesEnum: The output format of the private key.
`GOOGLE_CREDENTIALS_FILE` is the default output format.
Fields:
privateKeyType: The output format of the private key.
`GOOGLE_CREDENTIALS_FILE` is the default output format.
"""
class PrivateKeyTypeValueValuesEnum(_messages.Enum):
r"""The output format of the private key. `GOOGLE_CREDENTIALS_FILE` is the
default output format.
Values:
TYPE_UNSPECIFIED: Unspecified. Equivalent to
`TYPE_GOOGLE_CREDENTIALS_FILE`.
TYPE_PKCS12_FILE: PKCS12 format. The password for the PKCS12 file is
`notasecret`. For more information, see
https://tools.ietf.org/html/rfc7292.
TYPE_GOOGLE_CREDENTIALS_FILE: Google Credentials File format.
"""
TYPE_UNSPECIFIED = 0
TYPE_PKCS12_FILE = 1
TYPE_GOOGLE_CREDENTIALS_FILE = 2
privateKeyType = _messages.EnumField('PrivateKeyTypeValueValuesEnum', 1)
class CreateServiceAccountRequest(_messages.Message):
r"""The service account create request.
Fields:
accountId: Required. The account id that is used to generate the service
account email address and a stable unique id. It is unique within a
project, must be 1-63 characters long, and match the regular expression
`[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035.
serviceAccount: The ServiceAccount resource to create. Currently, only the
following values are user assignable: `display_name` .
"""
accountId = _messages.StringField(1)
serviceAccount = _messages.MessageField('ServiceAccount', 2)
class DataAccessOptions(_messages.Message):
r"""Write a Data Access (Gin) log"""
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class GetPolicyDetailsRequest(_messages.Message):
r"""The request to get the current policy and the policies on the inherited
resources the user has access to.
Fields:
fullResourcePath: REQUIRED: The full resource path of the current policy
being requested, e.g., `//dataflow.googleapis.com/projects/../jobs/..`.
pageSize: Limit on the number of policies to include in the response.
Further accounts can subsequently be obtained by including the
GetPolicyDetailsResponse.next_page_token in a subsequent request. If
zero, the default page size 20 will be used. Must be given a value in
range [0, 100], otherwise an invalid argument error will be returned.
pageToken: Optional pagination token returned in an earlier
GetPolicyDetailsResponse.next_page_token response.
"""
fullResourcePath = _messages.StringField(1)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
class GetPolicyDetailsResponse(_messages.Message):
r"""The response to the `GetPolicyDetailsRequest` containing the current
policy and the policies on the inherited resources the user has access to.
Fields:
nextPageToken: To retrieve the next page of results, set
GetPolicyDetailsRequest.page_token to this value. If this value is
empty, then there are not any further policies that the user has access
to. The lifetime is 60 minutes. An "Expired pagination token" error will
be returned if exceeded.
policies: The current policy and all the inherited policies the user has
access to.
"""
nextPageToken = _messages.StringField(1)
policies = _messages.MessageField('PolicyDetail', 2, repeated=True)
class IamProjectsServiceAccountsCreateRequest(_messages.Message):
r"""A IamProjectsServiceAccountsCreateRequest object.
Fields:
createServiceAccountRequest: A CreateServiceAccountRequest resource to be
passed as the request body.
name: Required. The resource name of the project associated with the
service accounts, such as `projects/my-project-123`.
"""
createServiceAccountRequest = _messages.MessageField('CreateServiceAccountRequest', 1)
name = _messages.StringField(2, required=True)
class IamProjectsServiceAccountsDeleteRequest(_messages.Message):
r"""A IamProjectsServiceAccountsDeleteRequest object.
Fields:
name: The resource name of the service account in the following format:
`projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard
for the project will infer the project from the account. The `account`
value can be the `email` address or the `unique_id` of the service
account.
"""
name = _messages.StringField(1, required=True)
class IamProjectsServiceAccountsGetIamPolicyRequest(_messages.Message):
r"""A IamProjectsServiceAccountsGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
`resource` is usually specified as a path, such as
`projects/*project*/zones/*zone*/disks/*disk*`. The format for the path
specified in this value is resource specific and is specified in the
`getIamPolicy` documentation.
"""
resource = _messages.StringField(1, required=True)
class IamProjectsServiceAccountsGetRequest(_messages.Message):
r"""A IamProjectsServiceAccountsGetRequest object.
Fields:
name: The resource name of the service account in the following format:
`projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard
for the project will infer the project from the account. The `account`
value can be the `email` address or the `unique_id` of the service
account.
"""
name = _messages.StringField(1, required=True)
class IamProjectsServiceAccountsKeysCreateRequest(_messages.Message):
r"""A IamProjectsServiceAccountsKeysCreateRequest object.
Fields:
createServiceAccountKeyRequest: A CreateServiceAccountKeyRequest resource
to be passed as the request body.
name: The resource name of the service account in the following format:
`projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard
for the project will infer the project from the account. The `account`
value can be the `email` address or the `unique_id` of the service
account.
"""
createServiceAccountKeyRequest = _messages.MessageField('CreateServiceAccountKeyRequest', 1)
name = _messages.StringField(2, required=True)
class IamProjectsServiceAccountsKeysDeleteRequest(_messages.Message):
r"""A IamProjectsServiceAccountsKeysDeleteRequest object.
Fields:
name: The resource name of the service account key in the following
format: `projects/{project}/serviceAccounts/{account}/keys/{key}`. Using
`-` as a wildcard for the project will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
"""
name = _messages.StringField(1, required=True)
class IamProjectsServiceAccountsKeysGetRequest(_messages.Message):
r"""A IamProjectsServiceAccountsKeysGetRequest object.
Enums:
PublicKeyTypeValueValuesEnum: The output format of the public key
requested. X509_PEM is the default output format.
Fields:
name: The resource name of the service account key in the following
format: `projects/{project}/serviceAccounts/{account}/keys/{key}`.
Using `-` as a wildcard for the project will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
publicKeyType: The output format of the public key requested. X509_PEM is
the default output format.
"""
class PublicKeyTypeValueValuesEnum(_messages.Enum):
r"""The output format of the public key requested. X509_PEM is the default
output format.
Values:
TYPE_NONE: <no description>
TYPE_X509_PEM_FILE: <no description>
TYPE_RAW_PUBLIC_KEY: <no description>
"""
TYPE_NONE = 0
TYPE_X509_PEM_FILE = 1
TYPE_RAW_PUBLIC_KEY = 2
name = _messages.StringField(1, required=True)
publicKeyType = _messages.EnumField('PublicKeyTypeValueValuesEnum', 2)
class IamProjectsServiceAccountsKeysListRequest(_messages.Message):
r"""A IamProjectsServiceAccountsKeysListRequest object.
Enums:
KeyTypesValueValuesEnum: Filters the types of keys the user wants to
include in the list response. Duplicate key types are not allowed. If no
key type is provided, all keys are returned.
Fields:
keyTypes: Filters the types of keys the user wants to include in the list
response. Duplicate key types are not allowed. If no key type is
provided, all keys are returned.
name: The resource name of the service account in the following format:
`projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard
for the project, will infer the project from the account. The `account`
value can be the `email` address or the `unique_id` of the service
account.
"""
class KeyTypesValueValuesEnum(_messages.Enum):
r"""Filters the types of keys the user wants to include in the list
response. Duplicate key types are not allowed. If no key type is provided,
all keys are returned.
Values:
KEY_TYPE_UNSPECIFIED: <no description>
USER_MANAGED: <no description>
SYSTEM_MANAGED: <no description>
"""
KEY_TYPE_UNSPECIFIED = 0
USER_MANAGED = 1
SYSTEM_MANAGED = 2
keyTypes = _messages.EnumField('KeyTypesValueValuesEnum', 1, repeated=True)
name = _messages.StringField(2, required=True)
class IamProjectsServiceAccountsListRequest(_messages.Message):
r"""A IamProjectsServiceAccountsListRequest object.
Fields:
name: Required. The resource name of the project associated with the
service accounts, such as `projects/my-project-123`.
pageSize: Optional limit on the number of service accounts to include in
the response. Further accounts can subsequently be obtained by including
the ListServiceAccountsResponse.next_page_token in a subsequent request.
pageToken: Optional pagination token returned in an earlier
ListServiceAccountsResponse.next_page_token.
removeDeletedServiceAccounts: Do not list service accounts deleted from
Gaia. <b><font color="red">DO NOT INCLUDE IN EXTERNAL
DOCUMENTATION</font></b>.
"""
name = _messages.StringField(1, required=True)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
removeDeletedServiceAccounts = _messages.BooleanField(4)
class IamProjectsServiceAccountsSetIamPolicyRequest(_messages.Message):
r"""A IamProjectsServiceAccountsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
`resource` is usually specified as a path, such as
`projects/*project*/zones/*zone*/disks/*disk*`. The format for the path
specified in this value is resource specific and is specified in the
`setIamPolicy` documentation.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class IamProjectsServiceAccountsSignBlobRequest(_messages.Message):
r"""A IamProjectsServiceAccountsSignBlobRequest object.
Fields:
name: The resource name of the service account in the following format:
`projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard
for the project will infer the project from the account. The `account`
value can be the `email` address or the `unique_id` of the service
account.
signBlobRequest: A SignBlobRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
signBlobRequest = _messages.MessageField('SignBlobRequest', 2)
class IamProjectsServiceAccountsSignJwtRequest(_messages.Message):
r"""A IamProjectsServiceAccountsSignJwtRequest object.
Fields:
name: The resource name of the service account in the following format:
`projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard
for the project will infer the project from the account. The `account`
value can be the `email` address or the `unique_id` of the service
account.
signJwtRequest: A SignJwtRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
signJwtRequest = _messages.MessageField('SignJwtRequest', 2)
class IamProjectsServiceAccountsTestIamPermissionsRequest(_messages.Message):
r"""A IamProjectsServiceAccountsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. `resource` is usually specified as a path, such as
`projects/*project*/zones/*zone*/disks/*disk*`. The format for the path
specified in this value is resource specific and is specified in the
`testIamPermissions` documentation.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class ListServiceAccountKeysResponse(_messages.Message):
r"""The service account keys list response.
Fields:
keys: The public keys for the service account.
"""
keys = _messages.MessageField('ServiceAccountKey', 1, repeated=True)
class ListServiceAccountsResponse(_messages.Message):
r"""The service account list response.
Fields:
accounts: The list of matching service accounts.
nextPageToken: To retrieve the next page of results, set
ListServiceAccountsRequest.page_token to this value.
"""
accounts = _messages.MessageField('ServiceAccount', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class LogConfig(_messages.Message):
r"""Specifies what kind of log the caller must write Increment a streamz
counter with the specified metric and field names. Metric names should
start with a '/', generally be lowercase-only, and end in "_count". Field
names should not contain an initial slash. The actual exported metric names
will have "/iam/policy" prepended. Field names correspond to IAM request
parameters and field values are their respective values. At present the
only supported field names are - "iam_principal", corresponding to
IAMContext.principal; - "" (empty string), resulting in one aggretated
counter with no field. Examples: counter { metric: "/debug_access_count"
field: "iam_principal" } ==> increment counter
/iam/policy/backend_debug_access_count
{iam_principal=[value of IAMContext.principal]} At this time we do not
support: * multiple field names (though this may be supported in the future)
* decrementing the counter * incrementing it by anything other than 1
Fields:
cloudAudit: Cloud audit options.
counter: Counter options.
dataAccess: Data access options.
"""
cloudAudit = _messages.MessageField('CloudAuditOptions', 1)
counter = _messages.MessageField('CounterOptions', 2)
dataAccess = _messages.MessageField('DataAccessOptions', 3)
class Policy(_messages.Message):
r"""Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources. A `Policy`
consists of a list of `bindings`. A `Binding` binds a list of `members` to a
`role`, where the members can be user accounts, Google groups, Google
domains, and service accounts. A `role` is a named list of permissions
defined by IAM. **Example** { "bindings": [ {
"role": "roles/owner", "members": [
"user:[email protected]", "group:[email protected]",
"domain:google.com", "serviceAccount:my-other-
[email protected]", ] }, {
"role": "roles/viewer", "members": ["user:[email protected]"]
} ] } For a description of IAM and its features, see the [IAM
developer's guide](https://cloud.google.com/iam).
Fields:
auditConfigs: Specifies audit logging configs for "data access". "data
access": generally refers to data reads/writes and admin reads. "admin
activity": generally refers to admin writes. Note: `AuditConfig`
doesn't apply to "admin activity", which always enables audit logging.
bindings: Associates a list of `members` to a `role`. Multiple `bindings`
must not be specified for the same `role`. `bindings` with no members
will result in an error.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. If no `etag` is provided in the call to `setIamPolicy`, then
the existing policy is overwritten blindly.
iamOwned: A boolean attribute.
rules: If more than one rule is specified, the rules are applied in the
following manner: - All matching LOG rules are always applied. - If any
DENY/DENY_WITH_LOG rule matches, permission is denied. Logging will be
applied if one or more matching rule requires logging. - Otherwise, if
any ALLOW/ALLOW_WITH_LOG rule matches, permission is granted.
Logging will be applied if one or more matching rule requires logging. -
Otherwise, if no rule applies, permission is denied.
version: Version of the `Policy`. The default version is 0.
"""
auditConfigs = _messages.MessageField('AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('Binding', 2, repeated=True)
etag = _messages.BytesField(3)
iamOwned = _messages.BooleanField(4)
rules = _messages.MessageField('Rule', 5, repeated=True)
version = _messages.IntegerField(6, variant=_messages.Variant.INT32)
class PolicyDetail(_messages.Message):
r"""A policy and its full resource path.
Fields:
fullResourcePath: The full resource path of the policy e.g.,
`//dataflow.googleapis.com/projects/../jobs/..`. Note that a resource
and its inherited resource have different `full_resource_path`.
policy: The policy of a `resource/project/folder`.
"""
fullResourcePath = _messages.StringField(1)
policy = _messages.MessageField('Policy', 2)
class QueryGrantableRolesRequest(_messages.Message):
r"""The grantable role query request.
Fields:
fullResourceName: Required. The full resource name to query from the list
of grantable roles. The name follows the Google Cloud Platform resource
format. For example, a Cloud Platform project with id `my-project` will
be named `//cloudresourcemanager.googleapis.com/projects/my-project`.
"""
fullResourceName = _messages.StringField(1)
class QueryGrantableRolesResponse(_messages.Message):
r"""The grantable role query response.
Fields:
roles: The list of matching roles.
"""
roles = _messages.MessageField('Role', 1, repeated=True)
class Role(_messages.Message):
r"""A role in the Identity and Access Management API.
Fields:
apiTokens: A string attribute.
description: Optional. A human-readable description for the role.
name: The name of the role. Examples of roles names are: `roles/editor`,
`roles/viewer` and `roles/logging.viewer`.
title: Optional. A human-readable title for the role. Typically this is
limited to 100 UTF-8 bytes.
"""
apiTokens = _messages.StringField(1, repeated=True)
description = _messages.StringField(2)
name = _messages.StringField(3)
title = _messages.StringField(4)
class Rule(_messages.Message):
r"""A rule to be applied in a Policy.
Enums:
ActionValueValuesEnum: Required
Fields:
action: Required
conditions: Additional restrictions that must be met
description: Human-readable description of the rule.
in_: If one or more 'in' clauses are specified, the rule matches if the
PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.
logConfig: The config returned to callers of tech.iam.IAM.CheckPolicy for
any entries that match the LOG action.
notIn: If one or more 'not_in' clauses are specified, the rule matches if
the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries. The format
for in and not_in entries is the same as for members in a Binding (see
google/iam/v1/policy.proto).
permissions: A permission is a string of form '<service>.<resource
type>.<verb>' (e.g., 'storage.buckets.list'). A value of '*' matches all
permissions, and a verb part of '*' (e.g., 'storage.buckets.*') matches
all verbs.
"""
class ActionValueValuesEnum(_messages.Enum):
r"""Required
Values:
NO_ACTION: Default no action.
ALLOW: Matching 'Entries' grant access.
ALLOW_WITH_LOG: Matching 'Entries' grant access and the caller promises
to log the request per the returned log_configs.
DENY: Matching 'Entries' deny access.
DENY_WITH_LOG: Matching 'Entries' deny access and the caller promises to
log the request per the returned log_configs.
LOG: Matching 'Entries' tell IAM.Check callers to generate logs.
"""
NO_ACTION = 0
ALLOW = 1
ALLOW_WITH_LOG = 2
DENY = 3
DENY_WITH_LOG = 4
LOG = 5
action = _messages.EnumField('ActionValueValuesEnum', 1)
conditions = _messages.MessageField('Condition', 2, repeated=True)
description = _messages.StringField(3)
in_ = _messages.StringField(4, repeated=True)
logConfig = _messages.MessageField('LogConfig', 5, repeated=True)
notIn = _messages.StringField(6, repeated=True)
permissions = _messages.StringField(7, repeated=True)
class ServiceAccount(_messages.Message):
r"""A service account in the Identity and Access Management API. To create
a service account, specify the `project_id` and the `account_id` for the
account. The `account_id` is unique within the project, and is used to
generate the service account email address and a stable `unique_id`. All
other methods can identify the service account using the format
`projects/{project}/serviceAccounts/{account}`. Using `-` as a wildcard for
the project will infer the project from the account. The `account` value can
be the `email` address or the `unique_id` of the service account.
Fields:
description: Optional. A user-specified opaque description of the service
account.
displayName: Optional. A user-specified description of the service
account. Must be fewer than 100 UTF-8 bytes.
email: @OutputOnly The email address of the service account.
etag: Used to perform a consistent read-modify-write.
name: The resource name of the service account in the following format:
`projects/{project}/serviceAccounts/{account}`. Requests using `-` as a
wildcard for the project will infer the project from the `account` and
the `account` value can be the `email` address or the `unique_id` of the
service account. In responses the resource name will always be in the
format `projects/{project}/serviceAccounts/{email}`.
oauth2ClientId: @OutputOnly. The OAuth2 client id for the service account.
This is used in conjunction with the OAuth2 clientconfig API to make
three legged OAuth2 (3LO) flows to access the data of Google users.
projectId: @OutputOnly The id of the project that owns the service
account.
uniqueId: @OutputOnly The unique and stable id of the service account.
"""
description = _messages.StringField(1)
displayName = _messages.StringField(2)
email = _messages.StringField(3)
etag = _messages.BytesField(4)
name = _messages.StringField(5)
oauth2ClientId = _messages.StringField(6)
projectId = _messages.StringField(7)
uniqueId = _messages.StringField(8)
class ServiceAccountKey(_messages.Message):
r"""Represents a service account key. A service account has two sets of
key-pairs: user-managed, and system-managed. User-managed key-pairs can be
created and deleted by users. Users are responsible for rotating these keys
periodically to ensure security of their service accounts. Users retain the
private key of these key-pairs, and Google retains ONLY the public key.
System-managed key-pairs are managed automatically by Google, and rotated
daily without user intervention. The private key never leaves Google's
servers to maximize security. Public keys for all service accounts are also
published at the OAuth2 Service Account API.
Enums:
PrivateKeyTypeValueValuesEnum: The output format for the private key. Only
provided in `CreateServiceAccountKey` responses, not in
`GetServiceAccountKey` or `ListServiceAccountKey` responses. Google
never exposes system-managed private keys, and never retains user-
managed private keys.
Fields:
name: The resource name of the service account key in the following format
`projects/{project}/serviceAccounts/{account}/keys/{key}`.
privateKeyData: The private key data. Only provided in
`CreateServiceAccountKey` responses.
privateKeyType: The output format for the private key. Only provided in
`CreateServiceAccountKey` responses, not in `GetServiceAccountKey` or
`ListServiceAccountKey` responses. Google never exposes system-managed
private keys, and never retains user-managed private keys.
publicKeyData: The public key data. Only provided in
`GetServiceAccountKey` responses.
validAfterTime: The key can be used after this timestamp.
validBeforeTime: The key can be used before this timestamp.
"""
class PrivateKeyTypeValueValuesEnum(_messages.Enum):
r"""The output format for the private key. Only provided in
`CreateServiceAccountKey` responses, not in `GetServiceAccountKey` or
`ListServiceAccountKey` responses. Google never exposes system-managed
private keys, and never retains user-managed private keys.
Values:
TYPE_UNSPECIFIED: Unspecified. Equivalent to
`TYPE_GOOGLE_CREDENTIALS_FILE`.
TYPE_PKCS12_FILE: PKCS12 format. The password for the PKCS12 file is
`notasecret`. For more information, see
https://tools.ietf.org/html/rfc7292.
TYPE_GOOGLE_CREDENTIALS_FILE: Google Credentials File format.
"""
TYPE_UNSPECIFIED = 0
TYPE_PKCS12_FILE = 1
TYPE_GOOGLE_CREDENTIALS_FILE = 2
name = _messages.StringField(1)
privateKeyData = _messages.BytesField(2)
privateKeyType = _messages.EnumField('PrivateKeyTypeValueValuesEnum', 3)
publicKeyData = _messages.BytesField(4)
validAfterTime = _messages.StringField(5)
validBeforeTime = _messages.StringField(6)
class SetIamPolicyRequest(_messages.Message):
r"""Request message for `SetIamPolicy` method.
Fields:
policy: REQUIRED: The complete policy to be applied to the `resource`. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
"""
policy = _messages.MessageField('Policy', 1)
class SignBlobRequest(_messages.Message):
r"""The service account sign blob request.
Fields:
bytesToSign: The bytes to sign.
"""
bytesToSign = _messages.BytesField(1)
class SignBlobResponse(_messages.Message):
r"""The service account sign blob response.
Fields:
keyId: The id of the key used to sign the blob.
signature: The signed blob.
"""
keyId = _messages.StringField(1)
signature = _messages.BytesField(2)
class SignJwtRequest(_messages.Message):
r"""The service account sign JWT request.
Fields:
payload: The JWT payload to sign, a JSON JWT Claim set.
"""
payload = _messages.StringField(1)
class SignJwtResponse(_messages.Message):
r"""The service account sign JWT response.
Fields:
keyId: The id of the key used to sign the JWT.
signedJwt: The signed JWT.
"""
keyId = _messages.StringField(1)
signedJwt = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class TestIamPermissionsRequest(_messages.Message):
r"""Request message for `TestIamPermissions` method.
Fields:
permissions: The set of permissions to check for the `resource`.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
For more information see IAM Overview.
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
r"""Response message for `TestIamPermissions` method.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
encoding.AddCustomJsonFieldMapping(
Rule, 'in_', 'in')
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/apitools/samples/iam_sample/iam_v1/iam_v1_messages.py
| 0.844008 | 0.348119 |
iam_v1_messages.py
|
pypi
|
ASN.1 library for Python
------------------------
[](https://pypi.python.org/pypi/pyasn1)
[](https://pypi.python.org/pypi/pyasn1/)
[](https://secure.travis-ci.org/etingof/pyasn1)
[](https://codecov.io/github/etingof/pyasn1)
[](https://raw.githubusercontent.com/etingof/pyasn1/master/LICENSE.txt)
This is a free and open source implementation of ASN.1 types and codecs
as a Python package. It has been first written to support particular
protocol (SNMP) but then generalized to be suitable for a wide range
of protocols based on
[ASN.1 specification](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.208-198811-W!!PDF-E&type=items).
Features
--------
* Generic implementation of ASN.1 types (X.208)
* Standards compliant BER/CER/DER codecs
* Dumps/loads ASN.1 structures from Python types
* 100% Python, works with Python 2.4 up to Python 3.6
* MT-safe
* Contributed ASN.1 compiler [Asn1ate](https://github.com/kimgr/asn1ate)
Why using pyasn1
----------------
ASN.1 solves the data serialisation problem. This solution was
designed long ago by the wise Ancients. Back then, they did not
have the luxury of wasting bits. That is why ASN.1 is designed
to serialise data structures of unbounded complexity into
something compact and efficient when it comes to processing
the data.
That probably explains why many network protocols and file formats
still rely on the 30+ years old technology. Including a number of
high-profile Internet protocols and file formats.
Quite a number of books cover the topic of ASN.1.
[Communication between heterogeneous systems](http://www.oss.com/asn1/dubuisson.html)
by Olivier Dubuisson is one of those high quality books freely
available on the Internet.
The pyasn1 package is designed to help Python programmers tackling
network protocols and file formats at the comfort of their Python
prompt. The tool struggles to capture all aspects of a rather
complicated ASN.1 system and to represent it on the Python terms.
How to use pyasn1
-----------------
With pyasn1 you can build Python objects from ASN.1 data structures.
For example, the following ASN.1 data structure:
```bash
Record ::= SEQUENCE {
id INTEGER,
room [0] INTEGER OPTIONAL,
house [1] INTEGER DEFAULT 0
}
```
Could be expressed in pyasn1 like this:
```python
class Record(Sequence):
componentType = NamedTypes(
NamedType('id', Integer()),
OptionalNamedType(
'room', Integer().subtype(
implicitTag=Tag(tagClassContext, tagFormatSimple, 0)
)
),
DefaultedNamedType(
'house', Integer(0).subtype(
implicitTag=Tag(tagClassContext, tagFormatSimple, 1)
)
)
)
```
It is in the spirit of ASN.1 to take abstract data description
and turn it into a programming language specific form.
Once you have your ASN.1 data structure expressed in Python, you
can use it along the lines of similar Python type (e.g. ASN.1
`SET` is similar to Python `dict`, `SET OF` to `list`):
```python
>>> record = Record()
>>> record['id'] = 123
>>> record['room'] = 321
>>> str(record)
Record:
id=123
room=321
>>>
```
Part of the power of ASN.1 comes from its serialisation features. You
can serialise your data structure and send it over the network.
```python
>>> from pyasn1.codec.der.encoder import encode
>>> substrate = encode(record)
>>> hexdump(substrate)
00000: 30 07 02 01 7B 80 02 01 41
```
Conversely, you can turn serialised ASN.1 content, as received from
network or read from a file, into a Python object which you can
introspect, modify, encode and send back.
```python
>>> from pyasn1.codec.der.decoder import decode
>>> received_record, rest_of_substrate = decode(substrate, asn1Spec=Record())
>>>
>>> for field in received_record:
>>> print('{} is {}'.format(field, received_record[field]))
id is 123
room is 321
house is 0
>>>
>>> record == received_record
True
>>> received_record.update(room=123)
>>> substrate = encode(received_record)
>>> hexdump(substrate)
00000: 30 06 02 01 7B 80 01 7B
```
The pyasn1 classes struggle to emulate their Python prototypes (e.g. int,
list, dict etc.). But ASN.1 types exhibit more complicated behaviour.
To make life easier for a Pythonista, they can turn their pyasn1
classes into Python built-ins:
```python
>>> from pyasn1.codec.native.encoder import encode
>>> encode(record)
{'id': 123, 'room': 321, 'house': 0}
```
Or vice-versa -- you can initialize an ASN.1 structure from a tree of
Python objects:
```python
>>> from pyasn1.codec.native.decoder import decode
>>> record = decode({'id': 123, 'room': 321, 'house': 0}, asn1Spec=Record())
>>> str(record)
Record:
id=123
room=321
>>>
```
With ASN.1 design, serialisation codecs are decoupled from data objects,
so you could turn every single ASN.1 object into many different
serialised forms. As of this moment, pyasn1 supports BER, DER, CER and
Python built-ins codecs. The extremely compact PER encoding is expected
to be introduced in the upcoming pyasn1 release.
More information on pyasn1 APIs can be found in the
[documentation](http://snmplabs.com/pyasn1/),
compiled ASN.1 modules for different protocols and file formats
could be found in the pyasn1-modules
[repo](https://github.com/etingof/pyasn1-modules).
How to get pyasn1
-----------------
The pyasn1 package is distributed under terms and conditions of 2-clause
BSD [license](http://snmplabs.com/pyasn1/license.html). Source code is freely
available as a GitHub [repo](https://github.com/etingof/pyasn1).
You could `pip install pyasn1` or download it from [PyPI](https://pypi.python.org/pypi/pyasn1).
If something does not work as expected,
[open an issue](https://github.com/etingof/pyasn1/issues) at GitHub or
post your question [on Stack Overflow](http://stackoverflow.com/questions/ask)
or try browsing pyasn1
[mailing list archives](https://sourceforge.net/p/pyasn1/mailman/pyasn1-users/).
Copyright (c) 2005-2017, [Ilya Etingof](mailto:[email protected]).
All rights reserved.
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/README.md
| 0.541894 | 0.914596 |
README.md
|
pypi
|
import sys
from pyasn1 import error
from pyasn1.type import tag
from pyasn1.type import tagmap
__all__ = ['NamedType', 'OptionalNamedType', 'DefaultedNamedType',
'NamedTypes']
try:
any
except NameError:
any = lambda x: bool(filter(bool, x))
class NamedType(object):
"""Create named field object for a constructed ASN.1 type.
The |NamedType| object represents a single name and ASN.1 type of a constructed ASN.1 type.
|NamedType| objects are immutable and duck-type Python :class:`tuple` objects
holding *name* and *asn1Object* components.
Parameters
----------
name: :py:class:`str`
Field name
asn1Object:
ASN.1 type object
"""
isOptional = False
isDefaulted = False
def __init__(self, name, asn1Object, openType=None):
self.__name = name
self.__type = asn1Object
self.__nameAndType = name, asn1Object
self.__openType = openType
def __repr__(self):
representation = '%s=%r' % (self.name, self.asn1Object)
if self.openType:
representation += ' openType: %r' % self.openType
return '<%s object at 0x%x type %s>' % (self.__class__.__name__, id(self), representation)
def __eq__(self, other):
return self.__nameAndType == other
def __ne__(self, other):
return self.__nameAndType != other
def __lt__(self, other):
return self.__nameAndType < other
def __le__(self, other):
return self.__nameAndType <= other
def __gt__(self, other):
return self.__nameAndType > other
def __ge__(self, other):
return self.__nameAndType >= other
def __hash__(self):
return hash(self.__nameAndType)
def __getitem__(self, idx):
return self.__nameAndType[idx]
def __iter__(self):
return iter(self.__nameAndType)
@property
def name(self):
return self.__name
@property
def asn1Object(self):
return self.__type
@property
def openType(self):
return self.__openType
# Backward compatibility
def getName(self):
return self.name
def getType(self):
return self.asn1Object
class OptionalNamedType(NamedType):
__doc__ = NamedType.__doc__
isOptional = True
class DefaultedNamedType(NamedType):
__doc__ = NamedType.__doc__
isDefaulted = True
class NamedTypes(object):
"""Create a collection of named fields for a constructed ASN.1 type.
The NamedTypes object represents a collection of named fields of a constructed ASN.1 type.
*NamedTypes* objects are immutable and duck-type Python :class:`dict` objects
holding *name* as keys and ASN.1 type object as values.
Parameters
----------
*namedTypes: :class:`~pyasn1.type.namedtype.NamedType`
Examples
--------
.. code-block:: python
class Description(Sequence):
'''
ASN.1 specification:
Description ::= SEQUENCE {
surname IA5String,
first-name IA5String OPTIONAL,
age INTEGER DEFAULT 40
}
'''
componentType = NamedTypes(
NamedType('surname', IA5String()),
OptionalNamedType('first-name', IA5String()),
DefaultedNamedType('age', Integer(40))
)
descr = Description()
descr['surname'] = 'Smith'
descr['first-name'] = 'John'
"""
def __init__(self, *namedTypes, **kwargs):
self.__namedTypes = namedTypes
self.__namedTypesLen = len(self.__namedTypes)
self.__minTagSet = self.__computeMinTagSet()
self.__nameToPosMap = self.__computeNameToPosMap()
self.__tagToPosMap = self.__computeTagToPosMap()
self.__ambiguousTypes = 'terminal' not in kwargs and self.__computeAmbiguousTypes() or {}
self.__uniqueTagMap = self.__computeTagMaps(unique=True)
self.__nonUniqueTagMap = self.__computeTagMaps(unique=False)
self.__hasOptionalOrDefault = any([True for namedType in self.__namedTypes
if namedType.isDefaulted or namedType.isOptional])
self.__hasOpenTypes = any([True for namedType in self.__namedTypes
if namedType.openType])
self.__requiredComponents = frozenset(
[idx for idx, nt in enumerate(self.__namedTypes) if not nt.isOptional and not nt.isDefaulted]
)
self.__keys = frozenset([namedType.name for namedType in self.__namedTypes])
self.__values = tuple([namedType.asn1Object for namedType in self.__namedTypes])
self.__items = tuple([(namedType.name, namedType.asn1Object) for namedType in self.__namedTypes])
def __repr__(self):
representation = ', '.join(['%r' % x for x in self.__namedTypes])
return '<%s object at 0x%x types %s>' % (self.__class__.__name__, id(self), representation)
def __eq__(self, other):
return self.__namedTypes == other
def __ne__(self, other):
return self.__namedTypes != other
def __lt__(self, other):
return self.__namedTypes < other
def __le__(self, other):
return self.__namedTypes <= other
def __gt__(self, other):
return self.__namedTypes > other
def __ge__(self, other):
return self.__namedTypes >= other
def __hash__(self):
return hash(self.__namedTypes)
def __getitem__(self, idx):
try:
return self.__namedTypes[idx]
except TypeError:
return self.__namedTypes[self.__nameToPosMap[idx]]
def __contains__(self, key):
return key in self.__nameToPosMap
def __iter__(self):
return (x[0] for x in self.__namedTypes)
if sys.version_info[0] <= 2:
def __nonzero__(self):
return self.__namedTypesLen > 0
else:
def __bool__(self):
return self.__namedTypesLen > 0
def __len__(self):
return self.__namedTypesLen
# Python dict protocol
def values(self):
return self.__values
def keys(self):
return self.__keys
def items(self):
return self.__items
def clone(self):
return self.__class__(*self.__namedTypes)
class PostponedError(object):
def __init__(self, errorMsg):
self.__errorMsg = errorMsg
def __getitem__(self, item):
raise error.PyAsn1Error(self.__errorMsg)
def __computeTagToPosMap(self):
tagToPosMap = {}
for idx, namedType in enumerate(self.__namedTypes):
tagMap = namedType.asn1Object.tagMap
if isinstance(tagMap, NamedTypes.PostponedError):
return tagMap
if not tagMap:
continue
for _tagSet in tagMap.presentTypes:
if _tagSet in tagToPosMap:
return NamedTypes.PostponedError('Duplicate component tag %s at %s' % (_tagSet, namedType))
tagToPosMap[_tagSet] = idx
return tagToPosMap
def __computeNameToPosMap(self):
nameToPosMap = {}
for idx, namedType in enumerate(self.__namedTypes):
if namedType.name in nameToPosMap:
return NamedTypes.PostponedError('Duplicate component name %s at %s' % (namedType.name, namedType))
nameToPosMap[namedType.name] = idx
return nameToPosMap
def __computeAmbiguousTypes(self):
ambigiousTypes = {}
partialAmbigiousTypes = ()
for idx, namedType in reversed(tuple(enumerate(self.__namedTypes))):
if namedType.isOptional or namedType.isDefaulted:
partialAmbigiousTypes = (namedType,) + partialAmbigiousTypes
else:
partialAmbigiousTypes = (namedType,)
if len(partialAmbigiousTypes) == len(self.__namedTypes):
ambigiousTypes[idx] = self
else:
ambigiousTypes[idx] = NamedTypes(*partialAmbigiousTypes, **dict(terminal=True))
return ambigiousTypes
def getTypeByPosition(self, idx):
"""Return ASN.1 type object by its position in fields set.
Parameters
----------
idx: :py:class:`int`
Field index
Returns
-------
:
ASN.1 type
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If given position is out of fields range
"""
try:
return self.__namedTypes[idx].asn1Object
except IndexError:
raise error.PyAsn1Error('Type position out of range')
def getPositionByType(self, tagSet):
"""Return field position by its ASN.1 type.
Parameters
----------
tagSet: :class:`~pysnmp.type.tag.TagSet`
ASN.1 tag set distinguishing one ASN.1 type from others.
Returns
-------
: :py:class:`int`
ASN.1 type position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *tagSet* is not present or ASN.1 types are not unique within callee *NamedTypes*
"""
try:
return self.__tagToPosMap[tagSet]
except KeyError:
raise error.PyAsn1Error('Type %s not found' % (tagSet,))
def getNameByPosition(self, idx):
"""Return field name by its position in fields set.
Parameters
----------
idx: :py:class:`idx`
Field index
Returns
-------
: :py:class:`str`
Field name
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If given field name is not present in callee *NamedTypes*
"""
try:
return self.__namedTypes[idx].name
except IndexError:
raise error.PyAsn1Error('Type position out of range')
def getPositionByName(self, name):
"""Return field position by filed name.
Parameters
----------
name: :py:class:`str`
Field name
Returns
-------
: :py:class:`int`
Field position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *name* is not present or not unique within callee *NamedTypes*
"""
try:
return self.__nameToPosMap[name]
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
def getTagMapNearPosition(self, idx):
"""Return ASN.1 types that are allowed at or past given field position.
Some ASN.1 serialisation allow for skipping optional and defaulted fields.
Some constructed ASN.1 types allow reordering of the fields. When recovering
such objects it may be important to know which types can possibly be
present at any given position in the field sets.
Parameters
----------
idx: :py:class:`int`
Field index
Returns
-------
: :class:`~pyasn1.type.tagmap.TagMap`
Map if ASN.1 types allowed at given field position
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If given position is out of fields range
"""
try:
return self.__ambiguousTypes[idx].tagMap
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def getPositionNearType(self, tagSet, idx):
"""Return the closest field position where given ASN.1 type is allowed.
Some ASN.1 serialisation allow for skipping optional and defaulted fields.
Some constructed ASN.1 types allow reordering of the fields. When recovering
such objects it may be important to know at which field position, in field set,
given *tagSet* is allowed at or past *idx* position.
Parameters
----------
tagSet: :class:`~pyasn1.type.tag.TagSet`
ASN.1 type which field position to look up
idx: :py:class:`int`
Field position at or past which to perform ASN.1 type look up
Returns
-------
: :py:class:`int`
Field position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *tagSet* is not present or not unique within callee *NamedTypes*
or *idx* is out of fields range
"""
try:
return idx + self.__ambiguousTypes[idx].getPositionByType(tagSet)
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def __computeMinTagSet(self):
minTagSet = None
for namedType in self.__namedTypes:
asn1Object = namedType.asn1Object
try:
tagSet = asn1Object.minTagSet
except AttributeError:
tagSet = asn1Object.tagSet
if minTagSet is None or tagSet < minTagSet:
minTagSet = tagSet
return minTagSet or tag.TagSet()
@property
def minTagSet(self):
"""Return the minimal TagSet among ASN.1 type in callee *NamedTypes*.
Some ASN.1 types/serialisation protocols require ASN.1 types to be
arranged based on their numerical tag value. The *minTagSet* property
returns that.
Returns
-------
: :class:`~pyasn1.type.tagset.TagSet`
Minimal TagSet among ASN.1 types in callee *NamedTypes*
"""
return self.__minTagSet
def __computeTagMaps(self, unique):
presentTypes = {}
skipTypes = {}
defaultType = None
for namedType in self.__namedTypes:
tagMap = namedType.asn1Object.tagMap
if isinstance(tagMap, NamedTypes.PostponedError):
return tagMap
for tagSet in tagMap:
if unique and tagSet in presentTypes:
return NamedTypes.PostponedError('Non-unique tagSet %s of %s at %s' % (tagSet, namedType, self))
presentTypes[tagSet] = namedType.asn1Object
skipTypes.update(tagMap.skipTypes)
if defaultType is None:
defaultType = tagMap.defaultType
elif tagMap.defaultType is not None:
return NamedTypes.PostponedError('Duplicate default ASN.1 type at %s' % (self,))
return tagmap.TagMap(presentTypes, skipTypes, defaultType)
@property
def tagMap(self):
"""Return a *TagMap* object from tags and types recursively.
Return a :class:`~pyasn1.type.tagmap.TagMap` object by
combining tags from *TagMap* objects of children types and
associating them with their immediate child type.
Example
-------
.. code-block:: python
OuterType ::= CHOICE {
innerType INTEGER
}
Calling *.tagMap* on *OuterType* will yield a map like this:
.. code-block:: python
Integer.tagSet -> Choice
"""
return self.__nonUniqueTagMap
@property
def tagMapUnique(self):
"""Return a *TagMap* object from unique tags and types recursively.
Return a :class:`~pyasn1.type.tagmap.TagMap` object by
combining tags from *TagMap* objects of children types and
associating them with their immediate child type.
Example
-------
.. code-block:: python
OuterType ::= CHOICE {
innerType INTEGER
}
Calling *.tagMapUnique* on *OuterType* will yield a map like this:
.. code-block:: python
Integer.tagSet -> Choice
Note
----
Duplicate *TagSet* objects found in the tree of children
types would cause error.
"""
return self.__uniqueTagMap
@property
def hasOptionalOrDefault(self):
return self.__hasOptionalOrDefault
@property
def hasOpenTypes(self):
return self.__hasOpenTypes
@property
def namedTypes(self):
return tuple(self.__namedTypes)
@property
def requiredComponents(self):
return self.__requiredComponents
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/pyasn1/type/namedtype.py
| 0.605099 | 0.297551 |
namedtype.py
|
pypi
|
from pyasn1 import error
__all__ = ['NamedValues']
class NamedValues(object):
"""Create named values object.
The |NamedValues| object represents a collection of string names
associated with numeric IDs. These objects are used for giving
names to otherwise numerical values.
|NamedValues| objects are immutable and duck-type Python
:class:`dict` object mapping ID to name and vice-versa.
Parameters
----------
\*args: variable number of two-element :py:class:`tuple`
name: :py:class:`str`
Value label
value: :py:class:`int`
Numeric value
Keyword Args
------------
name: :py:class:`str`
Value label
value: :py:class:`int`
Numeric value
Examples
--------
.. code-block:: pycon
>>> nv = NamedValues('a', 'b', ('c', 0), d=1)
>>> nv
>>> {'c': 0, 'd': 1, 'a': 2, 'b': 3}
>>> nv[0]
'c'
>>> nv['a']
2
"""
def __init__(self, *args, **kwargs):
self.__names = {}
self.__numbers = {}
anonymousNames = []
for namedValue in args:
if isinstance(namedValue, (tuple, list)):
try:
name, number = namedValue
except ValueError:
raise error.PyAsn1Error('Not a proper attribute-value pair %r' % (namedValue,))
else:
anonymousNames.append(namedValue)
continue
if name in self.__names:
raise error.PyAsn1Error('Duplicate name %s' % (name,))
if number in self.__numbers:
raise error.PyAsn1Error('Duplicate number %s=%s' % (name, number))
self.__names[name] = number
self.__numbers[number] = name
for name, number in kwargs.items():
if name in self.__names:
raise error.PyAsn1Error('Duplicate name %s' % (name,))
if number in self.__numbers:
raise error.PyAsn1Error('Duplicate number %s=%s' % (name, number))
self.__names[name] = number
self.__numbers[number] = name
if anonymousNames:
number = self.__numbers and max(self.__numbers) + 1 or 0
for name in anonymousNames:
if name in self.__names:
raise error.PyAsn1Error('Duplicate name %s' % (name,))
self.__names[name] = number
self.__numbers[number] = name
number += 1
def __repr__(self):
representation = ', '.join(['%s=%d' % x for x in self.items()])
if len(representation) > 64:
representation = representation[:32] + '...' + representation[-32:]
return '<%s object 0x%x enums %s>' % (self.__class__.__name__, id(self), representation)
def __eq__(self, other):
return dict(self) == other
def __ne__(self, other):
return dict(self) != other
def __lt__(self, other):
return dict(self) < other
def __le__(self, other):
return dict(self) <= other
def __gt__(self, other):
return dict(self) > other
def __ge__(self, other):
return dict(self) >= other
def __hash__(self):
return hash(self.items())
# Python dict protocol (read-only)
def __getitem__(self, key):
try:
return self.__numbers[key]
except KeyError:
return self.__names[key]
def __len__(self):
return len(self.__names)
def __contains__(self, key):
return key in self.__names or key in self.__numbers
def __iter__(self):
return iter(self.__names)
def values(self):
return iter(self.__numbers)
def keys(self):
return iter(self.__names)
def items(self):
for name in self.__names:
yield name, self.__names[name]
# support merging
def __add__(self, namedValues):
return self.__class__(*tuple(self.items()) + tuple(namedValues.items()))
# XXX clone/subtype?
def clone(self, *args, **kwargs):
new = self.__class__(*args, **kwargs)
return self + new
# legacy protocol
def getName(self, value):
if value in self.__numbers:
return self.__numbers[value]
def getValue(self, name):
if name in self.__names:
return self.__names[name]
def getValues(self, *names):
try:
return [self.__names[name] for name in names]
except KeyError:
raise error.PyAsn1Error(
'Unknown bit identifier(s): %s' % (set(names).difference(self.__names),)
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/pyasn1/type/namedval.py
| 0.895802 | 0.485234 |
namedval.py
|
pypi
|
from pyasn1 import error
__all__ = ['TagMap']
class TagMap(object):
"""Map *TagSet* objects to ASN.1 types
Create an object mapping *TagSet* object to ASN.1 type.
*TagMap* objects are immutable and duck-type read-only Python
:class:`dict` objects holding *TagSet* objects as keys and ASN.1
type objects as values.
Parameters
----------
presentTypes: :py:class:`dict`
Map of :class:`~pyasn1.type.tag.TagSet` to ASN.1 objects considered
as being unconditionally present in the *TagMap*.
skipTypes: :py:class:`dict`
A collection of :class:`~pyasn1.type.tag.TagSet` objects considered
as absent in the *TagMap* even when *defaultType* is present.
defaultType: ASN.1 type object
An ASN.1 type object callee *TagMap* returns for any *TagSet* key not present
in *presentTypes* (unless given key is present in *skipTypes*).
"""
def __init__(self, presentTypes=None, skipTypes=None, defaultType=None):
self.__presentTypes = presentTypes or {}
self.__skipTypes = skipTypes or {}
self.__defaultType = defaultType
def __contains__(self, tagSet):
return (tagSet in self.__presentTypes or
self.__defaultType is not None and tagSet not in self.__skipTypes)
def __getitem__(self, tagSet):
try:
return self.__presentTypes[tagSet]
except KeyError:
if self.__defaultType is None:
raise KeyError()
elif tagSet in self.__skipTypes:
raise error.PyAsn1Error('Key in negative map')
else:
return self.__defaultType
def __iter__(self):
return iter(self.__presentTypes)
def __repr__(self):
representation = '%s object at 0x%x' % (self.__class__.__name__, id(self))
if self.__presentTypes:
representation += ' present %s' % repr(self.__presentTypes)
if self.__skipTypes:
representation += ' skip %s' % repr(self.__skipTypes)
if self.__defaultType is not None:
representation += ' default %s' % repr(self.__defaultType)
return '<%s>' % representation
@property
def presentTypes(self):
"""Return *TagSet* to ASN.1 type map present in callee *TagMap*"""
return self.__presentTypes
@property
def skipTypes(self):
"""Return *TagSet* collection unconditionally absent in callee *TagMap*"""
return self.__skipTypes
@property
def defaultType(self):
"""Return default ASN.1 type being returned for any missing *TagSet*"""
return self.__defaultType
# Backward compatibility
def getPosMap(self):
return self.presentTypes
def getNegMap(self):
return self.skipTypes
def getDef(self):
return self.defaultType
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/pyasn1/type/tagmap.py
| 0.835785 | 0.343548 |
tagmap.py
|
pypi
|
import sys
from pyasn1.type import error
__all__ = ['SingleValueConstraint', 'ContainedSubtypeConstraint',
'ValueRangeConstraint', 'ValueSizeConstraint',
'PermittedAlphabetConstraint', 'InnerTypeConstraint',
'ConstraintsExclusion', 'ConstraintsIntersection',
'ConstraintsUnion']
class AbstractConstraint(object):
def __init__(self, *values):
self._valueMap = set()
self._setValues(values)
self.__hash = hash((self.__class__.__name__, self._values))
def __call__(self, value, idx=None):
if not self._values:
return
try:
self._testValue(value, idx)
except error.ValueConstraintError:
raise error.ValueConstraintError(
'%s failed at: %r' % (self, sys.exc_info()[1])
)
def __repr__(self):
representation = '%s object at 0x%x' % (self.__class__.__name__, id(self))
if self._values:
representation += ' consts %s' % ', '.join([repr(x) for x in self._values])
return '<%s>' % representation
def __eq__(self, other):
return self is other and True or self._values == other
def __ne__(self, other):
return self._values != other
def __lt__(self, other):
return self._values < other
def __le__(self, other):
return self._values <= other
def __gt__(self, other):
return self._values > other
def __ge__(self, other):
return self._values >= other
if sys.version_info[0] <= 2:
def __nonzero__(self):
return self._values and True or False
else:
def __bool__(self):
return self._values and True or False
def __hash__(self):
return self.__hash
def _setValues(self, values):
self._values = values
def _testValue(self, value, idx):
raise error.ValueConstraintError(value)
# Constraints derivation logic
def getValueMap(self):
return self._valueMap
def isSuperTypeOf(self, otherConstraint):
# TODO: fix possible comparison of set vs scalars here
return (otherConstraint is self or
not self._values or
otherConstraint == self or
self in otherConstraint.getValueMap())
def isSubTypeOf(self, otherConstraint):
return (otherConstraint is self or
not self or
otherConstraint == self or
otherConstraint in self._valueMap)
class SingleValueConstraint(AbstractConstraint):
"""Create a SingleValueConstraint object.
The SingleValueConstraint satisfies any value that
is present in the set of permitted values.
The SingleValueConstraint object can be applied to
any ASN.1 type.
Parameters
----------
\*values: :class:`int`
Full set of values permitted by this constraint object.
Examples
--------
.. code-block:: python
class DivisorOfSix(Integer):
'''
ASN.1 specification:
Divisor-Of-6 ::= INTEGER (1 | 2 | 3 | 6)
'''
subtypeSpec = SingleValueConstraint(1, 2, 3, 6)
# this will succeed
divisor_of_six = DivisorOfSix(1)
# this will raise ValueConstraintError
divisor_of_six = DivisorOfSix(7)
"""
def _setValues(self, values):
self._values = values
self._set = set(values)
def _testValue(self, value, idx):
if value not in self._set:
raise error.ValueConstraintError(value)
class ContainedSubtypeConstraint(AbstractConstraint):
"""Create a ContainedSubtypeConstraint object.
The ContainedSubtypeConstraint satisfies any value that
is present in the set of permitted values and also
satisfies included constraints.
The ContainedSubtypeConstraint object can be applied to
any ASN.1 type.
Parameters
----------
\*values:
Full set of values and constraint objects permitted
by this constraint object.
Examples
--------
.. code-block:: python
class DivisorOfEighteen(Integer):
'''
ASN.1 specification:
Divisors-of-18 ::= INTEGER (INCLUDES Divisors-of-6 | 9 | 18)
'''
subtypeSpec = ContainedSubtypeConstraint(
SingleValueConstraint(1, 2, 3, 6), 9, 18
)
# this will succeed
divisor_of_eighteen = DivisorOfEighteen(9)
# this will raise ValueConstraintError
divisor_of_eighteen = DivisorOfEighteen(10)
"""
def _testValue(self, value, idx):
for constraint in self._values:
if isinstance(constraint, AbstractConstraint):
constraint(value, idx)
elif value not in self._set:
raise error.ValueConstraintError(value)
class ValueRangeConstraint(AbstractConstraint):
"""Create a ValueRangeConstraint object.
The ValueRangeConstraint satisfies any value that
falls in the range of permitted values.
The ValueRangeConstraint object can only be applied
to :class:`~pyasn1.type.univ.Integer` and
:class:`~pyasn1.type.univ.Real` types.
Parameters
----------
start: :class:`int`
Minimum permitted value in the range (inclusive)
end: :class:`int`
Maximum permitted value in the range (inclusive)
Examples
--------
.. code-block:: python
class TeenAgeYears(Integer):
'''
ASN.1 specification:
TeenAgeYears ::= INTEGER (13 .. 19)
'''
subtypeSpec = ValueRangeConstraint(13, 19)
# this will succeed
teen_year = TeenAgeYears(18)
# this will raise ValueConstraintError
teen_year = TeenAgeYears(20)
"""
def _testValue(self, value, idx):
if value < self.start or value > self.stop:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 2:
raise error.PyAsn1Error(
'%s: bad constraint values' % (self.__class__.__name__,)
)
self.start, self.stop = values
if self.start > self.stop:
raise error.PyAsn1Error(
'%s: screwed constraint values (start > stop): %s > %s' % (
self.__class__.__name__,
self.start, self.stop
)
)
AbstractConstraint._setValues(self, values)
class ValueSizeConstraint(ValueRangeConstraint):
"""Create a ValueSizeConstraint object.
The ValueSizeConstraint satisfies any value for
as long as its size falls within the range of
permitted sizes.
The ValueSizeConstraint object can be applied
to :class:`~pyasn1.type.univ.BitString`,
:class:`~pyasn1.type.univ.OctetString` (including
all :ref:`character ASN.1 types <type.char>`),
:class:`~pyasn1.type.univ.SequenceOf`
and :class:`~pyasn1.type.univ.SetOf` types.
Parameters
----------
minimum: :class:`int`
Minimum permitted size of the value (inclusive)
maximum: :class:`int`
Maximum permitted size of the value (inclusive)
Examples
--------
.. code-block:: python
class BaseballTeamRoster(SetOf):
'''
ASN.1 specification:
BaseballTeamRoster ::= SET SIZE (1..25) OF PlayerNames
'''
componentType = PlayerNames()
subtypeSpec = ValueSizeConstraint(1, 25)
# this will succeed
team = BaseballTeamRoster()
team.extend(['Jan', 'Matej'])
encode(team)
# this will raise ValueConstraintError
team = BaseballTeamRoster()
team.extend(['Jan'] * 26)
encode(team)
Note
----
Whenever ValueSizeConstraint is applied to mutable types
(e.g. :class:`~pyasn1.type.univ.SequenceOf`,
:class:`~pyasn1.type.univ.SetOf`), constraint
validation only happens at the serialisation phase rather
than schema instantiation phase (as it is with immutable
types).
"""
def _testValue(self, value, idx):
valueSize = len(value)
if valueSize < self.start or valueSize > self.stop:
raise error.ValueConstraintError(value)
class PermittedAlphabetConstraint(SingleValueConstraint):
"""Create a PermittedAlphabetConstraint object.
The PermittedAlphabetConstraint satisfies any character
string for as long as all its characters are present in
the set of permitted characters.
The PermittedAlphabetConstraint object can only be applied
to the :ref:`character ASN.1 types <type.char>` such as
:class:`~pyasn1.type.char.IA5String`.
Parameters
----------
\*alphabet: :class:`str`
Full set of characters permitted by this constraint object.
Examples
--------
.. code-block:: python
class BooleanValue(IA5String):
'''
ASN.1 specification:
BooleanValue ::= IA5String (FROM ('T' | 'F'))
'''
subtypeSpec = PermittedAlphabetConstraint('T', 'F')
# this will succeed
truth = BooleanValue('T')
truth = BooleanValue('TF')
# this will raise ValueConstraintError
garbage = BooleanValue('TAF')
"""
def _setValues(self, values):
self._values = values
self._set = set(values)
def _testValue(self, value, idx):
if not self._set.issuperset(value):
raise error.ValueConstraintError(value)
# This is a bit kludgy, meaning two op modes within a single constraint
class InnerTypeConstraint(AbstractConstraint):
"""Value must satisfy the type and presence constraints"""
def _testValue(self, value, idx):
if self.__singleTypeConstraint:
self.__singleTypeConstraint(value)
elif self.__multipleTypeConstraint:
if idx not in self.__multipleTypeConstraint:
raise error.ValueConstraintError(value)
constraint, status = self.__multipleTypeConstraint[idx]
if status == 'ABSENT': # XXX presense is not checked!
raise error.ValueConstraintError(value)
constraint(value)
def _setValues(self, values):
self.__multipleTypeConstraint = {}
self.__singleTypeConstraint = None
for v in values:
if isinstance(v, tuple):
self.__multipleTypeConstraint[v[0]] = v[1], v[2]
else:
self.__singleTypeConstraint = v
AbstractConstraint._setValues(self, values)
# Logic operations on constraints
class ConstraintsExclusion(AbstractConstraint):
"""Create a ConstraintsExclusion logic operator object.
The ConstraintsExclusion logic operator succeeds when the
value does *not* satisfy the operand constraint.
The ConstraintsExclusion object can be applied to
any constraint and logic operator object.
Parameters
----------
constraint:
Constraint or logic operator object.
Examples
--------
.. code-block:: python
class Lipogramme(IA5STRING):
'''
ASN.1 specification:
Lipogramme ::=
IA5String (FROM (ALL EXCEPT ("e"|"E")))
'''
subtypeSpec = ConstraintsExclusion(
PermittedAlphabetConstraint('e', 'E')
)
# this will succeed
lipogramme = Lipogramme('A work of fiction?')
# this will raise ValueConstraintError
lipogramme = Lipogramme('Eel')
Warning
-------
The above example involving PermittedAlphabetConstraint might
not work due to the way how PermittedAlphabetConstraint works.
The other constraints might work with ConstraintsExclusion
though.
"""
def _testValue(self, value, idx):
try:
self._values[0](value, idx)
except error.ValueConstraintError:
return
else:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 1:
raise error.PyAsn1Error('Single constraint expected')
AbstractConstraint._setValues(self, values)
class AbstractConstraintSet(AbstractConstraint):
def __getitem__(self, idx):
return self._values[idx]
def __iter__(self):
return iter(self._values)
def __add__(self, value):
return self.__class__(*(self._values + (value,)))
def __radd__(self, value):
return self.__class__(*((value,) + self._values))
def __len__(self):
return len(self._values)
# Constraints inclusion in sets
def _setValues(self, values):
self._values = values
for constraint in values:
if constraint:
self._valueMap.add(constraint)
self._valueMap.update(constraint.getValueMap())
class ConstraintsIntersection(AbstractConstraintSet):
"""Create a ConstraintsIntersection logic operator object.
The ConstraintsIntersection logic operator only succeeds
if *all* its operands succeed.
The ConstraintsIntersection object can be applied to
any constraint and logic operator objects.
The ConstraintsIntersection object duck-types the immutable
container object like Python :py:class:`tuple`.
Parameters
----------
\*constraints:
Constraint or logic operator objects.
Examples
--------
.. code-block:: python
class CapitalAndSmall(IA5String):
'''
ASN.1 specification:
CapitalAndSmall ::=
IA5String (FROM ("A".."Z"|"a".."z"))
'''
subtypeSpec = ConstraintsIntersection(
PermittedAlphabetConstraint('A', 'Z'),
PermittedAlphabetConstraint('a', 'z')
)
# this will succeed
capital_and_small = CapitalAndSmall('Hello')
# this will raise ValueConstraintError
capital_and_small = CapitalAndSmall('hello')
"""
def _testValue(self, value, idx):
for constraint in self._values:
constraint(value, idx)
class ConstraintsUnion(AbstractConstraintSet):
"""Create a ConstraintsUnion logic operator object.
The ConstraintsUnion logic operator only succeeds if
*at least a single* operand succeeds.
The ConstraintsUnion object can be applied to
any constraint and logic operator objects.
The ConstraintsUnion object duck-types the immutable
container object like Python :py:class:`tuple`.
Parameters
----------
\*constraints:
Constraint or logic operator objects.
Examples
--------
.. code-block:: python
class CapitalOrSmall(IA5String):
'''
ASN.1 specification:
CapitalOrSmall ::=
IA5String (FROM ("A".."Z") | FROM ("a".."z"))
'''
subtypeSpec = ConstraintsIntersection(
PermittedAlphabetConstraint('A', 'Z'),
PermittedAlphabetConstraint('a', 'z')
)
# this will succeed
capital_or_small = CapitalAndSmall('Hello')
# this will raise ValueConstraintError
capital_or_small = CapitalOrSmall('hello!')
"""
def _testValue(self, value, idx):
for constraint in self._values:
try:
constraint(value, idx)
except error.ValueConstraintError:
pass
else:
return
raise error.ValueConstraintError(
'all of %s failed for "%s"' % (self._values, value)
)
# TODO:
# refactor InnerTypeConstraint
# add tests for type check
# implement other constraint types
# make constraint validation easy to skip
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/pyasn1/type/constraint.py
| 0.639961 | 0.272442 |
constraint.py
|
pypi
|
from pyasn1 import error
__all__ = ['tagClassUniversal', 'tagClassApplication', 'tagClassContext',
'tagClassPrivate', 'tagFormatSimple', 'tagFormatConstructed',
'tagCategoryImplicit', 'tagCategoryExplicit',
'tagCategoryUntagged', 'Tag', 'TagSet']
#: Identifier for ASN.1 class UNIVERSAL
tagClassUniversal = 0x00
#: Identifier for ASN.1 class APPLICATION
tagClassApplication = 0x40
#: Identifier for ASN.1 class context-specific
tagClassContext = 0x80
#: Identifier for ASN.1 class private
tagClassPrivate = 0xC0
#: Identifier for "simple" ASN.1 structure (e.g. scalar)
tagFormatSimple = 0x00
#: Identifier for "constructed" ASN.1 structure (e.g. may have inner components)
tagFormatConstructed = 0x20
tagCategoryImplicit = 0x01
tagCategoryExplicit = 0x02
tagCategoryUntagged = 0x04
class Tag(object):
"""Create ASN.1 tag
Represents ASN.1 tag that can be attached to a ASN.1 type to make
types distinguishable from each other.
*Tag* objects are immutable and duck-type Python :class:`tuple` objects
holding three integer components of a tag.
Parameters
----------
tagClass: :py:class:`int`
Tag *class* value
tagFormat: :py:class:`int`
Tag *format* value
tagId: :py:class:`int`
Tag ID value
"""
def __init__(self, tagClass, tagFormat, tagId):
if tagId < 0:
raise error.PyAsn1Error('Negative tag ID (%s) not allowed' % tagId)
self.__tagClass = tagClass
self.__tagFormat = tagFormat
self.__tagId = tagId
self.__tagClassId = tagClass, tagId
self.__hash = hash(self.__tagClassId)
def __repr__(self):
representation = '[%s:%s:%s]' % (self.__tagClass, self.__tagFormat, self.__tagId)
return '<%s object at 0x%x tag %s>' % (self.__class__.__name__, id(self), representation)
def __eq__(self, other):
return self.__tagClassId == other
def __ne__(self, other):
return self.__tagClassId != other
def __lt__(self, other):
return self.__tagClassId < other
def __le__(self, other):
return self.__tagClassId <= other
def __gt__(self, other):
return self.__tagClassId > other
def __ge__(self, other):
return self.__tagClassId >= other
def __hash__(self):
return self.__hash
def __getitem__(self, idx):
if idx == 0:
return self.__tagClass
elif idx == 1:
return self.__tagFormat
elif idx == 2:
return self.__tagId
else:
raise IndexError()
def __iter__(self):
yield self.__tagClass
yield self.__tagFormat
yield self.__tagId
def __and__(self, otherTag):
return self.__class__(self.__tagClass & otherTag.tagClass,
self.__tagFormat & otherTag.tagFormat,
self.__tagId & otherTag.tagId)
def __or__(self, otherTag):
return self.__class__(self.__tagClass | otherTag.tagClass,
self.__tagFormat | otherTag.tagFormat,
self.__tagId | otherTag.tagId)
@property
def tagClass(self):
"""ASN.1 tag class
Returns
-------
: :py:class:`int`
Tag class
"""
return self.__tagClass
@property
def tagFormat(self):
"""ASN.1 tag format
Returns
-------
: :py:class:`int`
Tag format
"""
return self.__tagFormat
@property
def tagId(self):
"""ASN.1 tag ID
Returns
-------
: :py:class:`int`
Tag ID
"""
return self.__tagId
class TagSet(object):
"""Create a collection of ASN.1 tags
Represents a combination of :class:`~pyasn1.type.tag.Tag` objects
that can be attached to a ASN.1 type to make types distinguishable
from each other.
*TagSet* objects are immutable and duck-type Python :class:`tuple` objects
holding arbitrary number of :class:`~pyasn1.type.tag.Tag` objects.
Parameters
----------
baseTag: :class:`~pyasn1.type.tag.Tag`
Base *Tag* object. This tag survives IMPLICIT tagging.
*superTags: :class:`~pyasn1.type.tag.Tag`
Additional *Tag* objects taking part in subtyping.
Examples
--------
.. code-block:: python
class OrderNumber(NumericString):
'''
ASN.1 specification
Order-number ::=
[APPLICATION 5] IMPLICIT NumericString
'''
tagSet = NumericString.tagSet.tagImplicitly(
Tag(tagClassApplication, tagFormatSimple, 5)
)
orderNumber = OrderNumber('1234')
"""
def __init__(self, baseTag=(), *superTags):
self.__baseTag = baseTag
self.__superTags = superTags
self.__superTagsClassId = tuple(
[(superTag.tagClass, superTag.tagId) for superTag in superTags]
)
self.__lenOfSuperTags = len(superTags)
self.__hash = hash(self.__superTagsClassId)
def __repr__(self):
representation = '-'.join(['%s:%s:%s' % (x.tagClass, x.tagFormat, x.tagId)
for x in self.__superTags])
if representation:
representation = 'tags ' + representation
else:
representation = 'untagged'
return '<%s object at 0x%x %s>' % (self.__class__.__name__, id(self), representation)
def __add__(self, superTag):
return self.__class__(self.__baseTag, *self.__superTags + (superTag,))
def __radd__(self, superTag):
return self.__class__(self.__baseTag, *(superTag,) + self.__superTags)
def __getitem__(self, i):
if i.__class__ is slice:
return self.__class__(self.__baseTag, *self.__superTags[i])
else:
return self.__superTags[i]
def __eq__(self, other):
return self.__superTagsClassId == other
def __ne__(self, other):
return self.__superTagsClassId != other
def __lt__(self, other):
return self.__superTagsClassId < other
def __le__(self, other):
return self.__superTagsClassId <= other
def __gt__(self, other):
return self.__superTagsClassId > other
def __ge__(self, other):
return self.__superTagsClassId >= other
def __hash__(self):
return self.__hash
def __len__(self):
return self.__lenOfSuperTags
@property
def baseTag(self):
"""Return base ASN.1 tag
Returns
-------
: :class:`~pyasn1.type.tag.Tag`
Base tag of this *TagSet*
"""
return self.__baseTag
@property
def superTags(self):
"""Return ASN.1 tags
Returns
-------
: :py:class:`tuple`
Tuple of :class:`~pyasn1.type.tag.Tag` objects that this *TagSet* contains
"""
return self.__superTags
def tagExplicitly(self, superTag):
"""Return explicitly tagged *TagSet*
Create a new *TagSet* representing callee *TagSet* explicitly tagged
with passed tag(s). With explicit tagging mode, new tags are appended
to existing tag(s).
Parameters
----------
superTag: :class:`~pyasn1.type.tag.Tag`
*Tag* object to tag this *TagSet*
Returns
-------
: :class:`~pyasn1.type.tag.TagSet`
New *TagSet* object
"""
if superTag.tagClass == tagClassUniversal:
raise error.PyAsn1Error("Can't tag with UNIVERSAL class tag")
if superTag.tagFormat != tagFormatConstructed:
superTag = Tag(superTag.tagClass, tagFormatConstructed, superTag.tagId)
return self + superTag
def tagImplicitly(self, superTag):
"""Return implicitly tagged *TagSet*
Create a new *TagSet* representing callee *TagSet* implicitly tagged
with passed tag(s). With implicit tagging mode, new tag(s) replace the
last existing tag.
Parameters
----------
superTag: :class:`~pyasn1.type.tag.Tag`
*Tag* object to tag this *TagSet*
Returns
-------
: :class:`~pyasn1.type.tag.TagSet`
New *TagSet* object
"""
if self.__superTags:
superTag = Tag(superTag.tagClass, self.__superTags[-1].tagFormat, superTag.tagId)
return self[:-1] + superTag
def isSuperTagSetOf(self, tagSet):
"""Test type relationship against given *TagSet*
The callee is considered to be a supertype of given *TagSet*
tag-wise if all tags in *TagSet* are present in the callee and
they are in the same order.
Parameters
----------
tagSet: :class:`~pyasn1.type.tag.TagSet`
*TagSet* object to evaluate against the callee
Returns
-------
: :py:class:`bool`
`True` if callee is a supertype of *tagSet*
"""
if len(tagSet) < self.__lenOfSuperTags:
return False
return self.__superTags == tagSet[:self.__lenOfSuperTags]
# Backward compatibility
def getBaseTag(self):
return self.__baseTag
def initTagSet(tag):
return TagSet(tag, tag)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/pyasn1/type/tag.py
| 0.845273 | 0.184437 |
tag.py
|
pypi
|
import datetime
from pyasn1 import error
from pyasn1.compat import dateandtime
from pyasn1.compat import string
from pyasn1.type import char
from pyasn1.type import tag
from pyasn1.type import univ
__all__ = ['ObjectDescriptor', 'GeneralizedTime', 'UTCTime']
NoValue = univ.NoValue
noValue = univ.noValue
class ObjectDescriptor(char.GraphicString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.GraphicString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 7)
)
# Optimization for faster codec lookup
typeId = char.GraphicString.getTypeId()
class TimeMixIn(object):
_yearsDigits = 4
_hasSubsecond = False
_optionalMinutes = False
_shortTZ = False
class FixedOffset(datetime.tzinfo):
"""Fixed offset in minutes east from UTC."""
# defaulted arguments required
# https: // docs.python.org / 2.3 / lib / datetime - tzinfo.html
def __init__(self, offset=0, name='UTC'):
self.__offset = datetime.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return datetime.timedelta(0)
UTC = FixedOffset()
@property
def asDateTime(self):
"""Create :py:class:`datetime.datetime` object from a |ASN.1| object.
Returns
-------
:
new instance of :py:class:`datetime.datetime` object
"""
text = str(self)
if text.endswith('Z'):
tzinfo = TimeMixIn.UTC
text = text[:-1]
elif '-' in text or '+' in text:
if '+' in text:
text, plusminus, tz = string.partition(text, '+')
else:
text, plusminus, tz = string.partition(text, '-')
if self._shortTZ and len(tz) == 2:
tz += '00'
if len(tz) != 4:
raise error.PyAsn1Error('malformed time zone offset %s' % tz)
try:
minutes = int(tz[:2]) * 60 + int(tz[2:])
if plusminus == '-':
minutes *= -1
except ValueError:
raise error.PyAsn1Error('unknown time specification %s' % self)
tzinfo = TimeMixIn.FixedOffset(minutes, '?')
else:
tzinfo = None
if '.' in text or ',' in text:
if '.' in text:
text, _, ms = string.partition(text, '.')
else:
text, _, ms = string.partition(text, ',')
try:
ms = int(ms) * 10000
except ValueError:
raise error.PyAsn1Error('bad sub-second time specification %s' % self)
else:
ms = 0
if self._optionalMinutes and len(text) - self._yearsDigits == 6:
text += '0000'
elif len(text) - self._yearsDigits == 8:
text += '00'
try:
dt = dateandtime.strptime(text, self._yearsDigits == 4 and '%Y%m%d%H%M%S' or '%y%m%d%H%M%S')
except ValueError:
raise error.PyAsn1Error('malformed datetime format %s' % self)
return dt.replace(microsecond=ms, tzinfo=tzinfo)
@classmethod
def fromDateTime(cls, dt):
"""Create |ASN.1| object from a :py:class:`datetime.datetime` object.
Parameters
----------
dt: :py:class:`datetime.datetime` object
The `datetime.datetime` object to initialize the |ASN.1| object
from
Returns
-------
:
new instance of |ASN.1| value
"""
text = dt.strftime(cls._yearsDigits == 4 and '%Y%m%d%H%M%S' or '%y%m%d%H%M%S')
if cls._hasSubsecond:
text += '.%d' % (dt.microsecond // 10000)
if dt.utcoffset():
seconds = dt.utcoffset().seconds
if seconds < 0:
text += '-'
else:
text += '+'
text += '%.2d%.2d' % (seconds // 3600, seconds % 3600)
else:
text += 'Z'
return cls(text)
class GeneralizedTime(char.VisibleString, TimeMixIn):
__doc__ = char.VisibleString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.VisibleString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 24)
)
# Optimization for faster codec lookup
typeId = char.VideotexString.getTypeId()
_yearsDigits = 4
_hasSubsecond = True
_optionalMinutes = True
_shortTZ = True
class UTCTime(char.VisibleString, TimeMixIn):
__doc__ = char.VisibleString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.VisibleString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 23)
)
# Optimization for faster codec lookup
typeId = char.VideotexString.getTypeId()
_yearsDigits = 2
_hasSubsecond = False
_optionalMinutes = False
_shortTZ = False
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/pyasn1/type/useful.py
| 0.797714 | 0.219871 |
useful.py
|
pypi
|
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
from pyasn1 import debug
from pyasn1 import error
from pyasn1.type import base
from pyasn1.type import char
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
__all__ = ['encode']
class AbstractItemEncoder(object):
def encode(self, value, encodeFun, **options):
raise error.PyAsn1Error('Not implemented')
class BooleanEncoder(AbstractItemEncoder):
def encode(self, value, encodeFun, **options):
return bool(value)
class IntegerEncoder(AbstractItemEncoder):
def encode(self, value, encodeFun, **options):
return int(value)
class BitStringEncoder(AbstractItemEncoder):
def encode(self, value, encodeFun, **options):
return str(value)
class OctetStringEncoder(AbstractItemEncoder):
def encode(self, value, encodeFun, **options):
return value.asOctets()
class TextStringEncoder(AbstractItemEncoder):
def encode(self, value, encodeFun, **options):
return str(value)
class NullEncoder(AbstractItemEncoder):
def encode(self, value, encodeFun, **options):
return None
class ObjectIdentifierEncoder(AbstractItemEncoder):
def encode(self, value, encodeFun, **options):
return str(value)
class RealEncoder(AbstractItemEncoder):
def encode(self, value, encodeFun, **options):
return float(value)
class SetEncoder(AbstractItemEncoder):
protoDict = dict
def encode(self, value, encodeFun, **options):
value.verifySizeSpec()
namedTypes = value.componentType
substrate = self.protoDict()
for idx, (key, subValue) in enumerate(value.items()):
if namedTypes and namedTypes[idx].isOptional and not value[idx].isValue:
continue
substrate[key] = encodeFun(subValue, **options)
return substrate
class SequenceEncoder(SetEncoder):
protoDict = OrderedDict
class SequenceOfEncoder(AbstractItemEncoder):
def encode(self, value, encodeFun, **options):
value.verifySizeSpec()
return [encodeFun(x, **options) for x in value]
class ChoiceEncoder(SequenceEncoder):
pass
class AnyEncoder(AbstractItemEncoder):
def encode(self, value, encodeFun, **options):
return value.asOctets()
tagMap = {
univ.Boolean.tagSet: BooleanEncoder(),
univ.Integer.tagSet: IntegerEncoder(),
univ.BitString.tagSet: BitStringEncoder(),
univ.OctetString.tagSet: OctetStringEncoder(),
univ.Null.tagSet: NullEncoder(),
univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
univ.Enumerated.tagSet: IntegerEncoder(),
univ.Real.tagSet: RealEncoder(),
# Sequence & Set have same tags as SequenceOf & SetOf
univ.SequenceOf.tagSet: SequenceOfEncoder(),
univ.SetOf.tagSet: SequenceOfEncoder(),
univ.Choice.tagSet: ChoiceEncoder(),
# character string types
char.UTF8String.tagSet: TextStringEncoder(),
char.NumericString.tagSet: TextStringEncoder(),
char.PrintableString.tagSet: TextStringEncoder(),
char.TeletexString.tagSet: TextStringEncoder(),
char.VideotexString.tagSet: TextStringEncoder(),
char.IA5String.tagSet: TextStringEncoder(),
char.GraphicString.tagSet: TextStringEncoder(),
char.VisibleString.tagSet: TextStringEncoder(),
char.GeneralString.tagSet: TextStringEncoder(),
char.UniversalString.tagSet: TextStringEncoder(),
char.BMPString.tagSet: TextStringEncoder(),
# useful types
useful.ObjectDescriptor.tagSet: OctetStringEncoder(),
useful.GeneralizedTime.tagSet: OctetStringEncoder(),
useful.UTCTime.tagSet: OctetStringEncoder()
}
# Type-to-codec map for ambiguous ASN.1 types
typeMap = {
univ.Set.typeId: SetEncoder(),
univ.SetOf.typeId: SequenceOfEncoder(),
univ.Sequence.typeId: SequenceEncoder(),
univ.SequenceOf.typeId: SequenceOfEncoder(),
univ.Choice.typeId: ChoiceEncoder(),
univ.Any.typeId: AnyEncoder()
}
class Encoder(object):
# noinspection PyDefaultArgument
def __init__(self, tagMap, typeMap={}):
self.__tagMap = tagMap
self.__typeMap = typeMap
def __call__(self, value, **options):
if not isinstance(value, base.Asn1Item):
raise error.PyAsn1Error('value is not valid (should be an instance of an ASN.1 Item)')
if debug.logger & debug.flagEncoder:
logger = debug.logger
else:
logger = None
if logger:
debug.scope.push(type(value).__name__)
logger('encoder called for type %s <%s>' % (type(value).__name__, value.prettyPrint()))
tagSet = value.tagSet
try:
concreteEncoder = self.__typeMap[value.typeId]
except KeyError:
# use base type for codec lookup to recover untagged types
baseTagSet = tag.TagSet(value.tagSet.baseTag, value.tagSet.baseTag)
try:
concreteEncoder = self.__tagMap[baseTagSet]
except KeyError:
raise error.PyAsn1Error('No encoder for %s' % (value,))
if logger:
logger('using value codec %s chosen by %s' % (concreteEncoder.__class__.__name__, tagSet))
pyObject = concreteEncoder.encode(value, self, **options)
if logger:
logger('encoder %s produced: %s' % (type(concreteEncoder).__name__, repr(pyObject)))
debug.scope.pop()
return pyObject
#: Turns ASN.1 object into a Python built-in type object(s).
#:
#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: walks all its components recursively and produces a Python built-in type or a tree
#: of those.
#:
#: One exception is that instead of :py:class:`dict`, the :py:class:`OrderedDict`
#: can be produced (whenever available) to preserve ordering of the components
#: in ASN.1 SEQUENCE.
#:
#: Parameters
#: ----------
# asn1Value: any pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: pyasn1 object to encode (or a tree of them)
#:
#: Returns
#: -------
#: : :py:class:`object`
#: Python built-in type instance (or a tree of them)
#:
#: Raises
#: ------
#: :py:class:`~pyasn1.error.PyAsn1Error`
#: On encoding errors
#:
#: Examples
#: --------
#: Encode ASN.1 value object into native Python types
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> seq.extend([1, 2, 3])
#: >>> encode(seq)
#: [1, 2, 3]
#:
encode = Encoder(tagMap, typeMap)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/pyasn1/codec/native/encoder.py
| 0.778355 | 0.228522 |
encoder.py
|
pypi
|
from pyasn1 import debug
from pyasn1 import error
from pyasn1.type import base
from pyasn1.type import char
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
__all__ = ['decode']
class AbstractScalarDecoder(object):
def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
return asn1Spec.clone(pyObject)
class BitStringDecoder(AbstractScalarDecoder):
def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
return asn1Spec.clone(univ.BitString.fromBinaryString(pyObject))
class SequenceOrSetDecoder(object):
def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
asn1Value = asn1Spec.clone()
componentsTypes = asn1Spec.componentType
for field in asn1Value:
if field in pyObject:
asn1Value[field] = decodeFun(pyObject[field], componentsTypes[field].asn1Object, **options)
return asn1Value
class SequenceOfOrSetOfDecoder(object):
def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
asn1Value = asn1Spec.clone()
for pyValue in pyObject:
asn1Value.append(decodeFun(pyValue, asn1Spec.componentType), **options)
return asn1Value
class ChoiceDecoder(object):
def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
asn1Value = asn1Spec.clone()
componentsTypes = asn1Spec.componentType
for field in pyObject:
if field in componentsTypes:
asn1Value[field] = decodeFun(pyObject[field], componentsTypes[field].asn1Object, **options)
break
return asn1Value
tagMap = {
univ.Integer.tagSet: AbstractScalarDecoder(),
univ.Boolean.tagSet: AbstractScalarDecoder(),
univ.BitString.tagSet: BitStringDecoder(),
univ.OctetString.tagSet: AbstractScalarDecoder(),
univ.Null.tagSet: AbstractScalarDecoder(),
univ.ObjectIdentifier.tagSet: AbstractScalarDecoder(),
univ.Enumerated.tagSet: AbstractScalarDecoder(),
univ.Real.tagSet: AbstractScalarDecoder(),
univ.Sequence.tagSet: SequenceOrSetDecoder(), # conflicts with SequenceOf
univ.Set.tagSet: SequenceOrSetDecoder(), # conflicts with SetOf
univ.Choice.tagSet: ChoiceDecoder(), # conflicts with Any
# character string types
char.UTF8String.tagSet: AbstractScalarDecoder(),
char.NumericString.tagSet: AbstractScalarDecoder(),
char.PrintableString.tagSet: AbstractScalarDecoder(),
char.TeletexString.tagSet: AbstractScalarDecoder(),
char.VideotexString.tagSet: AbstractScalarDecoder(),
char.IA5String.tagSet: AbstractScalarDecoder(),
char.GraphicString.tagSet: AbstractScalarDecoder(),
char.VisibleString.tagSet: AbstractScalarDecoder(),
char.GeneralString.tagSet: AbstractScalarDecoder(),
char.UniversalString.tagSet: AbstractScalarDecoder(),
char.BMPString.tagSet: AbstractScalarDecoder(),
# useful types
useful.ObjectDescriptor.tagSet: AbstractScalarDecoder(),
useful.GeneralizedTime.tagSet: AbstractScalarDecoder(),
useful.UTCTime.tagSet: AbstractScalarDecoder()
}
# Put in ambiguous & non-ambiguous types for faster codec lookup
typeMap = {
univ.Integer.typeId: AbstractScalarDecoder(),
univ.Boolean.typeId: AbstractScalarDecoder(),
univ.BitString.typeId: BitStringDecoder(),
univ.OctetString.typeId: AbstractScalarDecoder(),
univ.Null.typeId: AbstractScalarDecoder(),
univ.ObjectIdentifier.typeId: AbstractScalarDecoder(),
univ.Enumerated.typeId: AbstractScalarDecoder(),
univ.Real.typeId: AbstractScalarDecoder(),
# ambiguous base types
univ.Set.typeId: SequenceOrSetDecoder(),
univ.SetOf.typeId: SequenceOfOrSetOfDecoder(),
univ.Sequence.typeId: SequenceOrSetDecoder(),
univ.SequenceOf.typeId: SequenceOfOrSetOfDecoder(),
univ.Choice.typeId: ChoiceDecoder(),
univ.Any.typeId: AbstractScalarDecoder(),
# character string types
char.UTF8String.typeId: AbstractScalarDecoder(),
char.NumericString.typeId: AbstractScalarDecoder(),
char.PrintableString.typeId: AbstractScalarDecoder(),
char.TeletexString.typeId: AbstractScalarDecoder(),
char.VideotexString.typeId: AbstractScalarDecoder(),
char.IA5String.typeId: AbstractScalarDecoder(),
char.GraphicString.typeId: AbstractScalarDecoder(),
char.VisibleString.typeId: AbstractScalarDecoder(),
char.GeneralString.typeId: AbstractScalarDecoder(),
char.UniversalString.typeId: AbstractScalarDecoder(),
char.BMPString.typeId: AbstractScalarDecoder(),
# useful types
useful.ObjectDescriptor.typeId: AbstractScalarDecoder(),
useful.GeneralizedTime.typeId: AbstractScalarDecoder(),
useful.UTCTime.typeId: AbstractScalarDecoder()
}
class Decoder(object):
# noinspection PyDefaultArgument
def __init__(self, tagMap, typeMap):
self.__tagMap = tagMap
self.__typeMap = typeMap
def __call__(self, pyObject, asn1Spec, **options):
if debug.logger & debug.flagDecoder:
logger = debug.logger
else:
logger = None
if logger:
debug.scope.push(type(pyObject).__name__)
logger('decoder called at scope %s, working with type %s' % (debug.scope, type(pyObject).__name__))
if asn1Spec is None or not isinstance(asn1Spec, base.Asn1Item):
raise error.PyAsn1Error('asn1Spec is not valid (should be an instance of an ASN.1 Item, not %s)' % asn1Spec.__class__.__name__)
try:
valueDecoder = self.__typeMap[asn1Spec.typeId]
except KeyError:
# use base type for codec lookup to recover untagged types
baseTagSet = tag.TagSet(asn1Spec.tagSet.baseTag, asn1Spec.tagSet.baseTag)
try:
valueDecoder = self.__tagMap[baseTagSet]
except KeyError:
raise error.PyAsn1Error('Unknown ASN.1 tag %s' % asn1Spec.tagSet)
if logger:
logger('calling decoder %s on Python type %s <%s>' % (type(valueDecoder).__name__, type(pyObject).__name__, repr(pyObject)))
value = valueDecoder(pyObject, asn1Spec, self, **options)
if logger:
logger('decoder %s produced ASN.1 type %s <%s>' % (type(valueDecoder).__name__, type(value).__name__, repr(value)))
debug.scope.pop()
return value
#: Turns Python objects of built-in types into ASN.1 objects.
#:
#: Takes Python objects of built-in types and turns them into a tree of
#: ASN.1 objects (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
#: may be a scalar or an arbitrary nested structure.
#:
#: Parameters
#: ----------
#: pyObject: :py:class:`object`
#: A scalar or nested Python objects
#:
#: Keyword Args
#: ------------
#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#: A pyasn1 type object to act as a template guiding the decoder. It is required
#: for successful interpretation of Python objects mapping into their ASN.1
#: representations.
#:
#: Returns
#: -------
#: : :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#: A scalar or constructed pyasn1 object
#:
#: Raises
#: ------
#: :py:class:`~pyasn1.error.PyAsn1Error`
#: On decoding errors
#:
#: Examples
#: --------
#: Decode native Python object into ASN.1 objects with ASN.1 schema
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> s, _ = decode([1, 2, 3], asn1Spec=seq)
#: >>> str(s)
#: SequenceOf:
#: 1 2 3
#:
decode = Decoder(tagMap, typeMap)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/pyasn1/codec/native/decoder.py
| 0.724675 | 0.247123 |
decoder.py
|
pypi
|
from pyasn1 import error
from pyasn1.codec.cer import encoder
from pyasn1.type import univ
__all__ = ['encode']
class SetEncoder(encoder.SetEncoder):
@staticmethod
def _componentSortKey(componentAndType):
"""Sort SET components by tag
Sort depending on the actual Choice value (dynamic sort)
"""
component, asn1Spec = componentAndType
if asn1Spec is None:
compType = component
else:
compType = asn1Spec
if compType.typeId == univ.Choice.typeId and not compType.tagSet:
if asn1Spec is None:
return component.getComponent().tagSet
else:
# TODO: move out of sorting key function
names = [namedType.name for namedType in asn1Spec.componentType.namedTypes
if namedType.name in component]
if len(names) != 1:
raise error.PyAsn1Error(
'%s components for Choice at %r' % (len(names) and 'Multiple ' or 'None ', component))
# TODO: support nested CHOICE ordering
return asn1Spec[names[0]].tagSet
else:
return compType.tagSet
tagMap = encoder.tagMap.copy()
tagMap.update({
# Set & SetOf have same tags
univ.Set.tagSet: SetEncoder()
})
typeMap = encoder.typeMap.copy()
typeMap.update({
# Set & SetOf have same tags
univ.Set.typeId: SetEncoder()
})
class Encoder(encoder.Encoder):
fixedDefLengthMode = True
fixedChunkSize = 0
#: Turns ASN.1 object into DER octet stream.
#:
#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: walks all its components recursively and produces a DER octet stream.
#:
#: Parameters
#: ----------
#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
#: parameter is required to guide the encoding process.
#:
#: Keyword Args
#: ------------
#: asn1Spec:
#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#:
#: Returns
#: -------
#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
#: Given ASN.1 object encoded into BER octet-stream
#:
#: Raises
#: ------
#: :py:class:`~pyasn1.error.PyAsn1Error`
#: On encoding errors
#:
#: Examples
#: --------
#: Encode Python value into DER with ASN.1 schema
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> encode([1, 2, 3], asn1Spec=seq)
#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
#:
#: Encode ASN.1 value object into DER
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> seq.extend([1, 2, 3])
#: >>> encode(seq)
#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
#:
encode = Encoder(tagMap, typeMap)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/pyasn1/codec/der/encoder.py
| 0.539711 | 0.320436 |
encoder.py
|
pypi
|
from pyasn1.codec.cer import decoder
from pyasn1.type import univ
__all__ = ['decode']
class BitStringDecoder(decoder.BitStringDecoder):
supportConstructedForm = False
class OctetStringDecoder(decoder.OctetStringDecoder):
supportConstructedForm = False
# TODO: prohibit non-canonical encoding
RealDecoder = decoder.RealDecoder
tagMap = decoder.tagMap.copy()
tagMap.update(
{univ.BitString.tagSet: BitStringDecoder(),
univ.OctetString.tagSet: OctetStringDecoder(),
univ.Real.tagSet: RealDecoder()}
)
typeMap = decoder.typeMap.copy()
# Put in non-ambiguous types for faster codec lookup
for typeDecoder in tagMap.values():
if typeDecoder.protoComponent is not None:
typeId = typeDecoder.protoComponent.__class__.typeId
if typeId is not None and typeId not in typeMap:
typeMap[typeId] = typeDecoder
class Decoder(decoder.Decoder):
supportIndefLength = False
#: Turns DER octet stream into an ASN.1 object.
#:
#: Takes DER octet-stream and decode it into an ASN.1 object
#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
#: may be a scalar or an arbitrary nested structure.
#:
#: Parameters
#: ----------
#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
#: DER octet-stream
#:
#: Keyword Args
#: ------------
#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
#: being decoded, *asn1Spec* may or may not be required. Most common reason for
#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
#:
#: Returns
#: -------
#: : :py:class:`tuple`
#: A tuple of pyasn1 object recovered from DER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: and the unprocessed trailing portion of the *substrate* (may be empty)
#:
#: Raises
#: ------
#: :py:class:`~pyasn1.error.PyAsn1Error`
#: On decoding errors
#:
#: Examples
#: --------
#: Decode DER serialisation without ASN.1 schema
#:
#: .. code-block:: pycon
#:
#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
#: >>> str(s)
#: SequenceOf:
#: 1 2 3
#:
#: Decode DER serialisation with ASN.1 schema
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03', asn1Spec=seq)
#: >>> str(s)
#: SequenceOf:
#: 1 2 3
#:
decode = Decoder(tagMap, typeMap)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/pyasn1/codec/der/decoder.py
| 0.600071 | 0.236516 |
decoder.py
|
pypi
|
from pyasn1 import error
from pyasn1.codec.ber import encoder
from pyasn1.compat.octets import str2octs, null
from pyasn1.type import univ
from pyasn1.type import useful
__all__ = ['encode']
class BooleanEncoder(encoder.IntegerEncoder):
def encodeValue(self, value, asn1Spec, encodeFun, **options):
if value == 0:
substrate = (0,)
else:
substrate = (255,)
return substrate, False, False
class RealEncoder(encoder.RealEncoder):
def _chooseEncBase(self, value):
m, b, e = value
return self._dropFloatingPoint(m, b, e)
# specialized GeneralStringEncoder here
class TimeEncoderMixIn(object):
zchar, = str2octs('Z')
pluschar, = str2octs('+')
minuschar, = str2octs('-')
commachar, = str2octs(',')
minLength = 12
maxLength = 19
def encodeValue(self, value, asn1Spec, encodeFun, **options):
# Encoding constraints:
# - minutes are mandatory, seconds are optional
# - subseconds must NOT be zero
# - no hanging fraction dot
# - time in UTC (Z)
# - only dot is allowed for fractions
if asn1Spec is not None:
value = asn1Spec.clone(value)
octets = value.asOctets()
if not self.minLength < len(octets) < self.maxLength:
raise error.PyAsn1Error('Length constraint violated: %r' % value)
if self.pluschar in octets or self.minuschar in octets:
raise error.PyAsn1Error('Must be UTC time: %r' % octets)
if octets[-1] != self.zchar:
raise error.PyAsn1Error('Missing "Z" time zone specifier: %r' % octets)
if self.commachar in octets:
raise error.PyAsn1Error('Comma in fractions disallowed: %r' % value)
options.update(maxChunkSize=1000)
return encoder.OctetStringEncoder.encodeValue(
self, value, asn1Spec, encodeFun, **options
)
class GeneralizedTimeEncoder(TimeEncoderMixIn, encoder.OctetStringEncoder):
minLength = 12
maxLength = 19
class UTCTimeEncoder(TimeEncoderMixIn, encoder.OctetStringEncoder):
minLength = 10
maxLength = 14
class SetEncoder(encoder.SequenceEncoder):
@staticmethod
def _componentSortKey(componentAndType):
"""Sort SET components by tag
Sort regardless of the Choice value (static sort)
"""
component, asn1Spec = componentAndType
if asn1Spec is None:
asn1Spec = component
if asn1Spec.typeId == univ.Choice.typeId and not asn1Spec.tagSet:
if asn1Spec.tagSet:
return asn1Spec.tagSet
else:
return asn1Spec.componentType.minTagSet
else:
return asn1Spec.tagSet
def encodeValue(self, value, asn1Spec, encodeFun, **options):
substrate = null
comps = []
compsMap = {}
if asn1Spec is None:
# instance of ASN.1 schema
value.verifySizeSpec()
namedTypes = value.componentType
for idx, component in enumerate(value.values()):
if namedTypes:
namedType = namedTypes[idx]
if namedType.isOptional and not component.isValue:
continue
if namedType.isDefaulted and component == namedType.asn1Object:
continue
compsMap[id(component)] = namedType
else:
compsMap[id(component)] = None
comps.append((component, asn1Spec))
else:
# bare Python value + ASN.1 schema
for idx, namedType in enumerate(asn1Spec.componentType.namedTypes):
try:
component = value[namedType.name]
except KeyError:
raise error.PyAsn1Error('Component name "%s" not found in %r' % (namedType.name, value))
if namedType.isOptional and namedType.name not in value:
continue
if namedType.isDefaulted and component == namedType.asn1Object:
continue
compsMap[id(component)] = namedType
comps.append((component, asn1Spec[idx]))
for comp, compType in sorted(comps, key=self._componentSortKey):
namedType = compsMap[id(comp)]
if namedType:
options.update(ifNotEmpty=namedType.isOptional)
chunk = encodeFun(comp, compType, **options)
# wrap open type blob if needed
if namedType and namedType.openType:
wrapType = namedType.asn1Object
if wrapType.tagSet and not wrapType.isSameTypeWith(comp):
chunk = encodeFun(chunk, wrapType, **options)
substrate += chunk
return substrate, True, True
class SetOfEncoder(encoder.SequenceOfEncoder):
def encodeValue(self, value, asn1Spec, encodeFun, **options):
if asn1Spec is None:
value.verifySizeSpec()
else:
asn1Spec = asn1Spec.componentType
components = [encodeFun(x, asn1Spec, **options)
for x in value]
# sort by serialised and padded components
if len(components) > 1:
zero = str2octs('\x00')
maxLen = max(map(len, components))
paddedComponents = [
(x.ljust(maxLen, zero), x) for x in components
]
paddedComponents.sort(key=lambda x: x[0])
components = [x[1] for x in paddedComponents]
substrate = null.join(components)
return substrate, True, True
class SequenceEncoder(encoder.SequenceEncoder):
omitEmptyOptionals = True
class SequenceOfEncoder(encoder.SequenceOfEncoder):
def encodeValue(self, value, asn1Spec, encodeFun, **options):
if options.get('ifNotEmpty', False) and not len(value):
return null, True, True
if asn1Spec is None:
value.verifySizeSpec()
else:
asn1Spec = asn1Spec.componentType
substrate = null
for idx, component in enumerate(value):
substrate += encodeFun(value[idx], asn1Spec, **options)
return substrate, True, True
tagMap = encoder.tagMap.copy()
tagMap.update({
univ.Boolean.tagSet: BooleanEncoder(),
univ.Real.tagSet: RealEncoder(),
useful.GeneralizedTime.tagSet: GeneralizedTimeEncoder(),
useful.UTCTime.tagSet: UTCTimeEncoder(),
# Sequence & Set have same tags as SequenceOf & SetOf
univ.SetOf.tagSet: SetOfEncoder(),
univ.Sequence.typeId: SequenceEncoder()
})
typeMap = encoder.typeMap.copy()
typeMap.update({
univ.Boolean.typeId: BooleanEncoder(),
univ.Real.typeId: RealEncoder(),
useful.GeneralizedTime.typeId: GeneralizedTimeEncoder(),
useful.UTCTime.typeId: UTCTimeEncoder(),
# Sequence & Set have same tags as SequenceOf & SetOf
univ.Set.typeId: SetEncoder(),
univ.SetOf.typeId: SetOfEncoder(),
univ.Sequence.typeId: SequenceEncoder(),
univ.SequenceOf.typeId: SequenceOfEncoder()
})
class Encoder(encoder.Encoder):
fixedDefLengthMode = False
fixedChunkSize = 1000
#: Turns ASN.1 object into CER octet stream.
#:
#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: walks all its components recursively and produces a CER octet stream.
#:
#: Parameters
#: ----------
#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
#: parameter is required to guide the encoding process.
#:
#: Keyword Args
#: ------------
#: asn1Spec:
#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#:
#: Returns
#: -------
#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
#: Given ASN.1 object encoded into BER octet-stream
#:
#: Raises
#: ------
#: :py:class:`~pyasn1.error.PyAsn1Error`
#: On encoding errors
#:
#: Examples
#: --------
#: Encode Python value into CER with ASN.1 schema
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> encode([1, 2, 3], asn1Spec=seq)
#: b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00'
#:
#: Encode ASN.1 value object into CER
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> seq.extend([1, 2, 3])
#: >>> encode(seq)
#: b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00'
#:
encode = Encoder(tagMap, typeMap)
# EncoderFactory queries class instance and builds a map of tags -> encoders
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/pyasn1/codec/cer/encoder.py
| 0.772101 | 0.264097 |
encoder.py
|
pypi
|
from pyasn1 import debug
from pyasn1 import error
from pyasn1.codec.ber import eoo
from pyasn1.compat.integer import to_bytes
from pyasn1.compat.octets import (int2oct, oct2int, ints2octs, null,
str2octs, isOctetsType)
from pyasn1.type import char
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
__all__ = ['encode']
class AbstractItemEncoder(object):
supportIndefLenMode = True
# An outcome of otherwise legit call `encodeFun(eoo.endOfOctets)`
eooIntegerSubstrate = (0, 0)
eooOctetsSubstrate = ints2octs(eooIntegerSubstrate)
# noinspection PyMethodMayBeStatic
def encodeTag(self, singleTag, isConstructed):
tagClass, tagFormat, tagId = singleTag
encodedTag = tagClass | tagFormat
if isConstructed:
encodedTag |= tag.tagFormatConstructed
if tagId < 31:
return encodedTag | tagId,
else:
substrate = tagId & 0x7f,
tagId >>= 7
while tagId:
substrate = (0x80 | (tagId & 0x7f),) + substrate
tagId >>= 7
return (encodedTag | 0x1F,) + substrate
def encodeLength(self, length, defMode):
if not defMode and self.supportIndefLenMode:
return (0x80,)
if length < 0x80:
return length,
else:
substrate = ()
while length:
substrate = (length & 0xff,) + substrate
length >>= 8
substrateLen = len(substrate)
if substrateLen > 126:
raise error.PyAsn1Error('Length octets overflow (%d)' % substrateLen)
return (0x80 | substrateLen,) + substrate
def encodeValue(self, value, asn1Spec, encodeFun, **options):
raise error.PyAsn1Error('Not implemented')
def encode(self, value, asn1Spec=None, encodeFun=None, **options):
if asn1Spec is None:
tagSet = value.tagSet
else:
tagSet = asn1Spec.tagSet
# untagged item?
if not tagSet:
substrate, isConstructed, isOctets = self.encodeValue(
value, asn1Spec, encodeFun, **options
)
return substrate
defMode = options.get('defMode', True)
for idx, singleTag in enumerate(tagSet.superTags):
defModeOverride = defMode
# base tag?
if not idx:
substrate, isConstructed, isOctets = self.encodeValue(
value, asn1Spec, encodeFun, **options
)
if not substrate and isConstructed and options.get('ifNotEmpty', False):
return substrate
# primitive form implies definite mode
if not isConstructed:
defModeOverride = True
header = self.encodeTag(singleTag, isConstructed)
header += self.encodeLength(len(substrate), defModeOverride)
if isOctets:
substrate = ints2octs(header) + substrate
if not defModeOverride:
substrate += self.eooOctetsSubstrate
else:
substrate = header + substrate
if not defModeOverride:
substrate += self.eooIntegerSubstrate
if not isOctets:
substrate = ints2octs(substrate)
return substrate
class EndOfOctetsEncoder(AbstractItemEncoder):
def encodeValue(self, value, asn1Spec, encodeFun, **options):
return null, False, True
class BooleanEncoder(AbstractItemEncoder):
supportIndefLenMode = False
def encodeValue(self, value, asn1Spec, encodeFun, **options):
return value and (1,) or (0,), False, False
class IntegerEncoder(AbstractItemEncoder):
supportIndefLenMode = False
supportCompactZero = False
def encodeValue(self, value, asn1Spec, encodeFun, **options):
if value == 0:
# de-facto way to encode zero
if self.supportCompactZero:
return (), False, False
else:
return (0,), False, False
return to_bytes(int(value), signed=True), False, True
class BitStringEncoder(AbstractItemEncoder):
def encodeValue(self, value, asn1Spec, encodeFun, **options):
if asn1Spec is not None:
# TODO: try to avoid ASN.1 schema instantiation
value = asn1Spec.clone(value)
valueLength = len(value)
if valueLength % 8:
alignedValue = value << (8 - valueLength % 8)
else:
alignedValue = value
maxChunkSize = options.get('maxChunkSize', 0)
if not maxChunkSize or len(alignedValue) <= maxChunkSize * 8:
substrate = alignedValue.asOctets()
return int2oct(len(substrate) * 8 - valueLength) + substrate, False, True
baseTag = value.tagSet.baseTag
# strip off explicit tags
if baseTag:
tagSet = tag.TagSet(baseTag, baseTag)
else:
tagSet = tag.TagSet()
alignedValue = alignedValue.clone(tagSet=tagSet)
stop = 0
substrate = null
while stop < valueLength:
start = stop
stop = min(start + maxChunkSize * 8, valueLength)
substrate += encodeFun(alignedValue[start:stop], asn1Spec, **options)
return substrate, True, True
class OctetStringEncoder(AbstractItemEncoder):
def encodeValue(self, value, asn1Spec, encodeFun, **options):
if asn1Spec is None:
substrate = value.asOctets()
elif not isOctetsType(value):
substrate = asn1Spec.clone(value).asOctets()
else:
substrate = value
maxChunkSize = options.get('maxChunkSize', 0)
if not maxChunkSize or len(substrate) <= maxChunkSize:
return substrate, False, True
else:
# strip off explicit tags for inner chunks
if asn1Spec is None:
baseTag = value.tagSet.baseTag
# strip off explicit tags
if baseTag:
tagSet = tag.TagSet(baseTag, baseTag)
else:
tagSet = tag.TagSet()
asn1Spec = value.clone(tagSet=tagSet)
elif not isOctetsType(value):
baseTag = asn1Spec.tagSet.baseTag
# strip off explicit tags
if baseTag:
tagSet = tag.TagSet(baseTag, baseTag)
else:
tagSet = tag.TagSet()
asn1Spec = asn1Spec.clone(tagSet=tagSet)
pos = 0
substrate = null
while True:
chunk = value[pos:pos + maxChunkSize]
if not chunk:
break
substrate += encodeFun(chunk, asn1Spec, **options)
pos += maxChunkSize
return substrate, True, True
class NullEncoder(AbstractItemEncoder):
supportIndefLenMode = False
def encodeValue(self, value, asn1Spec, encodeFun, **options):
return null, False, True
class ObjectIdentifierEncoder(AbstractItemEncoder):
supportIndefLenMode = False
def encodeValue(self, value, asn1Spec, encodeFun, **options):
if asn1Spec is not None:
value = asn1Spec.clone(value)
oid = value.asTuple()
# Build the first pair
try:
first = oid[0]
second = oid[1]
except IndexError:
raise error.PyAsn1Error('Short OID %s' % (value,))
if 0 <= second <= 39:
if first == 1:
oid = (second + 40,) + oid[2:]
elif first == 0:
oid = (second,) + oid[2:]
elif first == 2:
oid = (second + 80,) + oid[2:]
else:
raise error.PyAsn1Error('Impossible first/second arcs at %s' % (value,))
elif first == 2:
oid = (second + 80,) + oid[2:]
else:
raise error.PyAsn1Error('Impossible first/second arcs at %s' % (value,))
octets = ()
# Cycle through subIds
for subOid in oid:
if 0 <= subOid <= 127:
# Optimize for the common case
octets += (subOid,)
elif subOid > 127:
# Pack large Sub-Object IDs
res = (subOid & 0x7f,)
subOid >>= 7
while subOid:
res = (0x80 | (subOid & 0x7f),) + res
subOid >>= 7
# Add packed Sub-Object ID to resulted Object ID
octets += res
else:
raise error.PyAsn1Error('Negative OID arc %s at %s' % (subOid, value))
return octets, False, False
class RealEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
binEncBase = 2 # set to None to choose encoding base automatically
@staticmethod
def _dropFloatingPoint(m, encbase, e):
ms, es = 1, 1
if m < 0:
ms = -1 # mantissa sign
if e < 0:
es = -1 # exponenta sign
m *= ms
if encbase == 8:
m *= 2 ** (abs(e) % 3 * es)
e = abs(e) // 3 * es
elif encbase == 16:
m *= 2 ** (abs(e) % 4 * es)
e = abs(e) // 4 * es
while True:
if int(m) != m:
m *= encbase
e -= 1
continue
break
return ms, int(m), encbase, e
def _chooseEncBase(self, value):
m, b, e = value
encBase = [2, 8, 16]
if value.binEncBase in encBase:
return self._dropFloatingPoint(m, value.binEncBase, e)
elif self.binEncBase in encBase:
return self._dropFloatingPoint(m, self.binEncBase, e)
# auto choosing base 2/8/16
mantissa = [m, m, m]
exponenta = [e, e, e]
sign = 1
encbase = 2
e = float('inf')
for i in range(3):
(sign,
mantissa[i],
encBase[i],
exponenta[i]) = self._dropFloatingPoint(mantissa[i], encBase[i], exponenta[i])
if abs(exponenta[i]) < abs(e) or (abs(exponenta[i]) == abs(e) and mantissa[i] < m):
e = exponenta[i]
m = int(mantissa[i])
encbase = encBase[i]
return sign, m, encbase, e
def encodeValue(self, value, asn1Spec, encodeFun, **options):
if asn1Spec is not None:
value = asn1Spec.clone(value)
if value.isPlusInf:
return (0x40,), False, False
if value.isMinusInf:
return (0x41,), False, False
m, b, e = value
if not m:
return null, False, True
if b == 10:
return str2octs('\x03%dE%s%d' % (m, e == 0 and '+' or '', e)), False, True
elif b == 2:
fo = 0x80 # binary encoding
ms, m, encbase, e = self._chooseEncBase(value)
if ms < 0: # mantissa sign
fo |= 0x40 # sign bit
# exponenta & mantissa normalization
if encbase == 2:
while m & 0x1 == 0:
m >>= 1
e += 1
elif encbase == 8:
while m & 0x7 == 0:
m >>= 3
e += 1
fo |= 0x10
else: # encbase = 16
while m & 0xf == 0:
m >>= 4
e += 1
fo |= 0x20
sf = 0 # scale factor
while m & 0x1 == 0:
m >>= 1
sf += 1
if sf > 3:
raise error.PyAsn1Error('Scale factor overflow') # bug if raised
fo |= sf << 2
eo = null
if e == 0 or e == -1:
eo = int2oct(e & 0xff)
else:
while e not in (0, -1):
eo = int2oct(e & 0xff) + eo
e >>= 8
if e == 0 and eo and oct2int(eo[0]) & 0x80:
eo = int2oct(0) + eo
if e == -1 and eo and not (oct2int(eo[0]) & 0x80):
eo = int2oct(0xff) + eo
n = len(eo)
if n > 0xff:
raise error.PyAsn1Error('Real exponent overflow')
if n == 1:
pass
elif n == 2:
fo |= 1
elif n == 3:
fo |= 2
else:
fo |= 3
eo = int2oct(n & 0xff) + eo
po = null
while m:
po = int2oct(m & 0xff) + po
m >>= 8
substrate = int2oct(fo) + eo + po
return substrate, False, True
else:
raise error.PyAsn1Error('Prohibited Real base %s' % b)
class SequenceEncoder(AbstractItemEncoder):
omitEmptyOptionals = False
# TODO: handling three flavors of input is too much -- split over codecs
def encodeValue(self, value, asn1Spec, encodeFun, **options):
substrate = null
if asn1Spec is None:
# instance of ASN.1 schema
value.verifySizeSpec()
namedTypes = value.componentType
for idx, component in enumerate(value.values()):
if namedTypes:
namedType = namedTypes[idx]
if namedType.isOptional and not component.isValue:
continue
if namedType.isDefaulted and component == namedType.asn1Object:
continue
if self.omitEmptyOptionals:
options.update(ifNotEmpty=namedType.isOptional)
chunk = encodeFun(component, asn1Spec, **options)
# wrap open type blob if needed
if namedTypes and namedType.openType:
wrapType = namedType.asn1Object
if wrapType.tagSet and not wrapType.isSameTypeWith(component):
chunk = encodeFun(chunk, wrapType, **options)
substrate += chunk
else:
# bare Python value + ASN.1 schema
for idx, namedType in enumerate(asn1Spec.componentType.namedTypes):
try:
component = value[namedType.name]
except KeyError:
raise error.PyAsn1Error('Component name "%s" not found in %r' % (namedType.name, value))
if namedType.isOptional and namedType.name not in value:
continue
if namedType.isDefaulted and component == namedType.asn1Object:
continue
if self.omitEmptyOptionals:
options.update(ifNotEmpty=namedType.isOptional)
chunk = encodeFun(component, asn1Spec[idx], **options)
# wrap open type blob if needed
if namedType.openType:
wrapType = namedType.asn1Object
if wrapType.tagSet and not wrapType.isSameTypeWith(component):
chunk = encodeFun(chunk, wrapType, **options)
substrate += chunk
return substrate, True, True
class SequenceOfEncoder(AbstractItemEncoder):
def encodeValue(self, value, asn1Spec, encodeFun, **options):
if asn1Spec is None:
value.verifySizeSpec()
else:
asn1Spec = asn1Spec.componentType
substrate = null
for idx, component in enumerate(value):
substrate += encodeFun(value[idx], asn1Spec, **options)
return substrate, True, True
class ChoiceEncoder(AbstractItemEncoder):
def encodeValue(self, value, asn1Spec, encodeFun, **options):
if asn1Spec is None:
component = value.getComponent()
else:
names = [namedType.name for namedType in asn1Spec.componentType.namedTypes
if namedType.name in value]
if len(names) != 1:
raise error.PyAsn1Error('%s components for Choice at %r' % (len(names) and 'Multiple ' or 'None ', value))
name = names[0]
component = value[name]
asn1Spec = asn1Spec[name]
return encodeFun(component, asn1Spec, **options), True, True
class AnyEncoder(OctetStringEncoder):
def encodeValue(self, value, asn1Spec, encodeFun, **options):
if asn1Spec is None:
value = value.asOctets()
elif not isOctetsType(value):
value = asn1Spec.clone(value).asOctets()
return value, not options.get('defMode', True), True
tagMap = {
eoo.endOfOctets.tagSet: EndOfOctetsEncoder(),
univ.Boolean.tagSet: BooleanEncoder(),
univ.Integer.tagSet: IntegerEncoder(),
univ.BitString.tagSet: BitStringEncoder(),
univ.OctetString.tagSet: OctetStringEncoder(),
univ.Null.tagSet: NullEncoder(),
univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
univ.Enumerated.tagSet: IntegerEncoder(),
univ.Real.tagSet: RealEncoder(),
# Sequence & Set have same tags as SequenceOf & SetOf
univ.SequenceOf.tagSet: SequenceOfEncoder(),
univ.SetOf.tagSet: SequenceOfEncoder(),
univ.Choice.tagSet: ChoiceEncoder(),
# character string types
char.UTF8String.tagSet: OctetStringEncoder(),
char.NumericString.tagSet: OctetStringEncoder(),
char.PrintableString.tagSet: OctetStringEncoder(),
char.TeletexString.tagSet: OctetStringEncoder(),
char.VideotexString.tagSet: OctetStringEncoder(),
char.IA5String.tagSet: OctetStringEncoder(),
char.GraphicString.tagSet: OctetStringEncoder(),
char.VisibleString.tagSet: OctetStringEncoder(),
char.GeneralString.tagSet: OctetStringEncoder(),
char.UniversalString.tagSet: OctetStringEncoder(),
char.BMPString.tagSet: OctetStringEncoder(),
# useful types
useful.ObjectDescriptor.tagSet: OctetStringEncoder(),
useful.GeneralizedTime.tagSet: OctetStringEncoder(),
useful.UTCTime.tagSet: OctetStringEncoder()
}
# Put in ambiguous & non-ambiguous types for faster codec lookup
typeMap = {
univ.Boolean.typeId: BooleanEncoder(),
univ.Integer.typeId: IntegerEncoder(),
univ.BitString.typeId: BitStringEncoder(),
univ.OctetString.typeId: OctetStringEncoder(),
univ.Null.typeId: NullEncoder(),
univ.ObjectIdentifier.typeId: ObjectIdentifierEncoder(),
univ.Enumerated.typeId: IntegerEncoder(),
univ.Real.typeId: RealEncoder(),
# Sequence & Set have same tags as SequenceOf & SetOf
univ.Set.typeId: SequenceEncoder(),
univ.SetOf.typeId: SequenceOfEncoder(),
univ.Sequence.typeId: SequenceEncoder(),
univ.SequenceOf.typeId: SequenceOfEncoder(),
univ.Choice.typeId: ChoiceEncoder(),
univ.Any.typeId: AnyEncoder(),
# character string types
char.UTF8String.typeId: OctetStringEncoder(),
char.NumericString.typeId: OctetStringEncoder(),
char.PrintableString.typeId: OctetStringEncoder(),
char.TeletexString.typeId: OctetStringEncoder(),
char.VideotexString.typeId: OctetStringEncoder(),
char.IA5String.typeId: OctetStringEncoder(),
char.GraphicString.typeId: OctetStringEncoder(),
char.VisibleString.typeId: OctetStringEncoder(),
char.GeneralString.typeId: OctetStringEncoder(),
char.UniversalString.typeId: OctetStringEncoder(),
char.BMPString.typeId: OctetStringEncoder(),
# useful types
useful.ObjectDescriptor.typeId: OctetStringEncoder(),
useful.GeneralizedTime.typeId: OctetStringEncoder(),
useful.UTCTime.typeId: OctetStringEncoder()
}
class Encoder(object):
fixedDefLengthMode = None
fixedChunkSize = None
# noinspection PyDefaultArgument
def __init__(self, tagMap, typeMap={}):
self.__tagMap = tagMap
self.__typeMap = typeMap
def __call__(self, value, asn1Spec=None, **options):
try:
if asn1Spec is None:
typeId = value.typeId
else:
typeId = asn1Spec.typeId
except AttributeError:
raise error.PyAsn1Error('Value %r is not ASN.1 type instance '
'and "asn1Spec" not given' % (value,))
if debug.logger & debug.flagEncoder:
logger = debug.logger
else:
logger = None
if logger:
logger('encoder called in %sdef mode, chunk size %s for '
'type %s, value:\n%s' % (not options.get('defMode', True) and 'in' or '', options.get('maxChunkSize', 0), asn1Spec is None and value.prettyPrintType() or asn1Spec.prettyPrintType(), value))
if self.fixedDefLengthMode is not None:
options.update(defMode=self.fixedDefLengthMode)
if self.fixedChunkSize is not None:
options.update(maxChunkSize=self.fixedChunkSize)
try:
concreteEncoder = self.__typeMap[typeId]
if logger:
logger('using value codec %s chosen by type ID %s' % (concreteEncoder.__class__.__name__, typeId))
except KeyError:
if asn1Spec is None:
tagSet = value.tagSet
else:
tagSet = asn1Spec.tagSet
# use base type for codec lookup to recover untagged types
baseTagSet = tag.TagSet(tagSet.baseTag, tagSet.baseTag)
try:
concreteEncoder = self.__tagMap[baseTagSet]
except KeyError:
raise error.PyAsn1Error('No encoder for %r (%s)' % (value, tagSet))
if logger:
logger('using value codec %s chosen by tagSet %s' % (concreteEncoder.__class__.__name__, tagSet))
substrate = concreteEncoder.encode(value, asn1Spec, self, **options)
if logger:
logger('codec %s built %s octets of substrate: %s\nencoder completed' % (concreteEncoder, len(substrate), debug.hexdump(substrate)))
return substrate
#: Turns ASN.1 object into BER octet stream.
#:
#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: walks all its components recursively and produces a BER octet stream.
#:
#: Parameters
#: ----------
#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
#: parameter is required to guide the encoding process.
#:
#: Keyword Args
#: ------------
#: asn1Spec:
#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#:
#: defMode: :py:class:`bool`
#: If `False`, produces indefinite length encoding
#:
#: maxChunkSize: :py:class:`int`
#: Maximum chunk size in chunked encoding mode (0 denotes unlimited chunk size)
#:
#: Returns
#: -------
#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
#: Given ASN.1 object encoded into BER octetstream
#:
#: Raises
#: ------
#: :py:class:`~pyasn1.error.PyAsn1Error`
#: On encoding errors
#:
#: Examples
#: --------
#: Encode Python value into BER with ASN.1 schema
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> encode([1, 2, 3], asn1Spec=seq)
#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
#:
#: Encode ASN.1 value object into BER
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> seq.extend([1, 2, 3])
#: >>> encode(seq)
#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
#:
encode = Encoder(tagMap, typeMap)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1/pyasn1/codec/ber/encoder.py
| 0.430866 | 0.202266 |
encoder.py
|
pypi
|
.. funcsigs documentation master file, created by
sphinx-quickstart on Fri Apr 20 20:27:52 2012.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Introducing funcsigs
====================
The Funcsigs Package
--------------------
``funcsigs`` is a backport of the `PEP 362`_ function signature features from
Python 3.3's `inspect`_ module. The backport is compatible with Python 2.6, 2.7
as well as 3.3 and up. 3.2 was supported by version 0.4, but with setuptools and
pip no longer supporting 3.2, we cannot make any statement about 3.2
compatibility.
Compatibility
`````````````
The ``funcsigs`` backport has been tested against:
* CPython 2.6
* CPython 2.7
* CPython 3.3
* CPython 3.4
* CPython 3.5
* CPython nightlies
* PyPy and PyPy3(currently failing CI)
Continuous integration testing is provided by `Travis CI`_.
Under Python 2.x there is a compatibility issue when a function is assigned to
the ``__wrapped__`` property of a class after it has been constructed.
Similiarily there under PyPy directly passing the ``__call__`` method of a
builtin is also a compatibility issues. Otherwise the functionality is
believed to be uniform between both Python2 and Python3.
Issues
``````
Source code for ``funcsigs`` is hosted on `GitHub`_. Any bug reports or feature
requests can be made using GitHub's `issues system`_. |build_status| |coverage|
Example
-------
To obtain a `Signature` object, pass the target function to the
``funcsigs.signature`` function.
.. code-block:: python
>>> from funcsigs import signature
>>> def foo(a, b=None, *args, **kwargs):
... pass
...
>>> sig = signature(foo)
>>> sig
<funcsigs.Signature object at 0x...>
>>> sig.parameters
OrderedDict([('a', <Parameter at 0x... 'a'>), ('b', <Parameter at 0x... 'b'>), ('args', <Parameter at 0x... 'args'>), ('kwargs', <Parameter at 0x... 'kwargs'>)])
>>> sig.return_annotation
<class 'funcsigs._empty'>
Introspecting callables with the Signature object
-------------------------------------------------
.. note::
This section of documentation is a direct reproduction of the Python
standard library documentation for the inspect module.
The Signature object represents the call signature of a callable object and its
return annotation. To retrieve a Signature object, use the :func:`signature`
function.
.. function:: signature(callable)
Return a :class:`Signature` object for the given ``callable``::
>>> from funcsigs import signature
>>> def foo(a, *, b:int, **kwargs):
... pass
>>> sig = signature(foo)
>>> str(sig)
'(a, *, b:int, **kwargs)'
>>> str(sig.parameters['b'])
'b:int'
>>> sig.parameters['b'].annotation
<class 'int'>
Accepts a wide range of python callables, from plain functions and classes to
:func:`functools.partial` objects.
.. note::
Some callables may not be introspectable in certain implementations of
Python. For example, in CPython, built-in functions defined in C provide
no metadata about their arguments.
.. class:: Signature
A Signature object represents the call signature of a function and its return
annotation. For each parameter accepted by the function it stores a
:class:`Parameter` object in its :attr:`parameters` collection.
Signature objects are *immutable*. Use :meth:`Signature.replace` to make a
modified copy.
.. attribute:: Signature.empty
A special class-level marker to specify absence of a return annotation.
.. attribute:: Signature.parameters
An ordered mapping of parameters' names to the corresponding
:class:`Parameter` objects.
.. attribute:: Signature.return_annotation
The "return" annotation for the callable. If the callable has no "return"
annotation, this attribute is set to :attr:`Signature.empty`.
.. method:: Signature.bind(*args, **kwargs)
Create a mapping from positional and keyword arguments to parameters.
Returns :class:`BoundArguments` if ``*args`` and ``**kwargs`` match the
signature, or raises a :exc:`TypeError`.
.. method:: Signature.bind_partial(*args, **kwargs)
Works the same way as :meth:`Signature.bind`, but allows the omission of
some required arguments (mimics :func:`functools.partial` behavior.)
Returns :class:`BoundArguments`, or raises a :exc:`TypeError` if the
passed arguments do not match the signature.
.. method:: Signature.replace(*[, parameters][, return_annotation])
Create a new Signature instance based on the instance replace was invoked
on. It is possible to pass different ``parameters`` and/or
``return_annotation`` to override the corresponding properties of the base
signature. To remove return_annotation from the copied Signature, pass in
:attr:`Signature.empty`.
::
>>> def test(a, b):
... pass
>>> sig = signature(test)
>>> new_sig = sig.replace(return_annotation="new return anno")
>>> str(new_sig)
"(a, b) -> 'new return anno'"
.. class:: Parameter
Parameter objects are *immutable*. Instead of modifying a Parameter object,
you can use :meth:`Parameter.replace` to create a modified copy.
.. attribute:: Parameter.empty
A special class-level marker to specify absence of default values and
annotations.
.. attribute:: Parameter.name
The name of the parameter as a string. Must be a valid python identifier
name (with the exception of ``POSITIONAL_ONLY`` parameters, which can have
it set to ``None``).
.. attribute:: Parameter.default
The default value for the parameter. If the parameter has no default
value, this attribute is set to :attr:`Parameter.empty`.
.. attribute:: Parameter.annotation
The annotation for the parameter. If the parameter has no annotation,
this attribute is set to :attr:`Parameter.empty`.
.. attribute:: Parameter.kind
Describes how argument values are bound to the parameter. Possible values
(accessible via :class:`Parameter`, like ``Parameter.KEYWORD_ONLY``):
+------------------------+----------------------------------------------+
| Name | Meaning |
+========================+==============================================+
| *POSITIONAL_ONLY* | Value must be supplied as a positional |
| | argument. |
| | |
| | Python has no explicit syntax for defining |
| | positional-only parameters, but many built-in|
| | and extension module functions (especially |
| | those that accept only one or two parameters)|
| | accept them. |
+------------------------+----------------------------------------------+
| *POSITIONAL_OR_KEYWORD*| Value may be supplied as either a keyword or |
| | positional argument (this is the standard |
| | binding behaviour for functions implemented |
| | in Python.) |
+------------------------+----------------------------------------------+
| *VAR_POSITIONAL* | A tuple of positional arguments that aren't |
| | bound to any other parameter. This |
| | corresponds to a ``*args`` parameter in a |
| | Python function definition. |
+------------------------+----------------------------------------------+
| *KEYWORD_ONLY* | Value must be supplied as a keyword argument.|
| | Keyword only parameters are those which |
| | appear after a ``*`` or ``*args`` entry in a |
| | Python function definition. |
+------------------------+----------------------------------------------+
| *VAR_KEYWORD* | A dict of keyword arguments that aren't bound|
| | to any other parameter. This corresponds to a|
| | ``**kwargs`` parameter in a Python function |
| | definition. |
+------------------------+----------------------------------------------+
Example: print all keyword-only arguments without default values::
>>> def foo(a, b, *, c, d=10):
... pass
>>> sig = signature(foo)
>>> for param in sig.parameters.values():
... if (param.kind == param.KEYWORD_ONLY and
... param.default is param.empty):
... print('Parameter:', param)
Parameter: c
.. method:: Parameter.replace(*[, name][, kind][, default][, annotation])
Create a new Parameter instance based on the instance replaced was invoked
on. To override a :class:`Parameter` attribute, pass the corresponding
argument. To remove a default value or/and an annotation from a
Parameter, pass :attr:`Parameter.empty`.
::
>>> from funcsigs import Parameter
>>> param = Parameter('foo', Parameter.KEYWORD_ONLY, default=42)
>>> str(param)
'foo=42'
>>> str(param.replace()) # Will create a shallow copy of 'param'
'foo=42'
>>> str(param.replace(default=Parameter.empty, annotation='spam'))
"foo:'spam'"
.. class:: BoundArguments
Result of a :meth:`Signature.bind` or :meth:`Signature.bind_partial` call.
Holds the mapping of arguments to the function's parameters.
.. attribute:: BoundArguments.arguments
An ordered, mutable mapping (:class:`collections.OrderedDict`) of
parameters' names to arguments' values. Contains only explicitly bound
arguments. Changes in :attr:`arguments` will reflect in :attr:`args` and
:attr:`kwargs`.
Should be used in conjunction with :attr:`Signature.parameters` for any
argument processing purposes.
.. note::
Arguments for which :meth:`Signature.bind` or
:meth:`Signature.bind_partial` relied on a default value are skipped.
However, if needed, it is easy to include them.
::
>>> def foo(a, b=10):
... pass
>>> sig = signature(foo)
>>> ba = sig.bind(5)
>>> ba.args, ba.kwargs
((5,), {})
>>> for param in sig.parameters.values():
... if param.name not in ba.arguments:
... ba.arguments[param.name] = param.default
>>> ba.args, ba.kwargs
((5, 10), {})
.. attribute:: BoundArguments.args
A tuple of positional arguments values. Dynamically computed from the
:attr:`arguments` attribute.
.. attribute:: BoundArguments.kwargs
A dict of keyword arguments values. Dynamically computed from the
:attr:`arguments` attribute.
The :attr:`args` and :attr:`kwargs` properties can be used to invoke
functions::
def test(a, *, b):
...
sig = signature(test)
ba = sig.bind(10, b=20)
test(*ba.args, **ba.kwargs)
.. seealso::
:pep:`362` - Function Signature Object.
The detailed specification, implementation details and examples.
Copyright
---------
*funcsigs* is a derived work of CPython under the terms of the `PSF License
Agreement`_. The original CPython inspect module, its unit tests and
documentation are the copyright of the Python Software Foundation. The derived
work is distributed under the `Apache License Version 2.0`_.
.. _PSF License Agreement: http://docs.python.org/3/license.html#terms-and-conditions-for-accessing-or-otherwise-using-python
.. _Apache License Version 2.0: http://opensource.org/licenses/Apache-2.0
.. _GitHub: https://github.com/testing-cabal/funcsigs
.. _PSF License Agreement: http://docs.python.org/3/license.html#terms-and-conditions-for-accessing-or-otherwise-using-python
.. _Travis CI: http://travis-ci.org/
.. _Read The Docs: http://funcsigs.readthedocs.org/
.. _PEP 362: http://www.python.org/dev/peps/pep-0362/
.. _inspect: http://docs.python.org/3/library/inspect.html#introspecting-callables-with-the-signature-object
.. _issues system: https://github.com/testing-cabal/funcsigs/issues
.. |build_status| image:: https://secure.travis-ci.org/aliles/funcsigs.png?branch=master
:target: http://travis-ci.org/#!/aliles/funcsigs
:alt: Current build status
.. |coverage| image:: https://coveralls.io/repos/aliles/funcsigs/badge.png?branch=master
:target: https://coveralls.io/r/aliles/funcsigs?branch=master
:alt: Coverage status
.. |pypi_version| image:: https://pypip.in/v/funcsigs/badge.png
:target: https://crate.io/packages/funcsigs/
:alt: Latest PyPI version
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/funcsigs/README.rst
| 0.797754 | 0.665399 |
README.rst
|
pypi
|
.. funcsigs documentation master file, created by
sphinx-quickstart on Fri Apr 20 20:27:52 2012.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Introducing funcsigs
====================
The Funcsigs Package
--------------------
``funcsigs`` is a backport of the `PEP 362`_ function signature features from
Python 3.3's `inspect`_ module. The backport is compatible with Python 2.6, 2.7
as well as 3.3 and up. 3.2 was supported by version 0.4, but with setuptools and
pip no longer supporting 3.2, we cannot make any statement about 3.2
compatibility.
Compatibility
`````````````
The ``funcsigs`` backport has been tested against:
* CPython 2.6
* CPython 2.7
* CPython 3.3
* CPython 3.4
* CPython 3.5
* CPython nightlies
* PyPy and PyPy3(currently failing CI)
Continuous integration testing is provided by `Travis CI`_.
Under Python 2.x there is a compatibility issue when a function is assigned to
the ``__wrapped__`` property of a class after it has been constructed.
Similiarily there under PyPy directly passing the ``__call__`` method of a
builtin is also a compatibility issues. Otherwise the functionality is
believed to be uniform between both Python2 and Python3.
Issues
``````
Source code for ``funcsigs`` is hosted on `GitHub`_. Any bug reports or feature
requests can be made using GitHub's `issues system`_. |build_status| |coverage|
Example
-------
To obtain a `Signature` object, pass the target function to the
``funcsigs.signature`` function.
.. code-block:: python
>>> from funcsigs import signature
>>> def foo(a, b=None, *args, **kwargs):
... pass
...
>>> sig = signature(foo)
>>> sig
<funcsigs.Signature object at 0x...>
>>> sig.parameters
OrderedDict([('a', <Parameter at 0x... 'a'>), ('b', <Parameter at 0x... 'b'>), ('args', <Parameter at 0x... 'args'>), ('kwargs', <Parameter at 0x... 'kwargs'>)])
>>> sig.return_annotation
<class 'funcsigs._empty'>
Introspecting callables with the Signature object
-------------------------------------------------
.. note::
This section of documentation is a direct reproduction of the Python
standard library documentation for the inspect module.
The Signature object represents the call signature of a callable object and its
return annotation. To retrieve a Signature object, use the :func:`signature`
function.
.. function:: signature(callable)
Return a :class:`Signature` object for the given ``callable``::
>>> from funcsigs import signature
>>> def foo(a, *, b:int, **kwargs):
... pass
>>> sig = signature(foo)
>>> str(sig)
'(a, *, b:int, **kwargs)'
>>> str(sig.parameters['b'])
'b:int'
>>> sig.parameters['b'].annotation
<class 'int'>
Accepts a wide range of python callables, from plain functions and classes to
:func:`functools.partial` objects.
.. note::
Some callables may not be introspectable in certain implementations of
Python. For example, in CPython, built-in functions defined in C provide
no metadata about their arguments.
.. class:: Signature
A Signature object represents the call signature of a function and its return
annotation. For each parameter accepted by the function it stores a
:class:`Parameter` object in its :attr:`parameters` collection.
Signature objects are *immutable*. Use :meth:`Signature.replace` to make a
modified copy.
.. attribute:: Signature.empty
A special class-level marker to specify absence of a return annotation.
.. attribute:: Signature.parameters
An ordered mapping of parameters' names to the corresponding
:class:`Parameter` objects.
.. attribute:: Signature.return_annotation
The "return" annotation for the callable. If the callable has no "return"
annotation, this attribute is set to :attr:`Signature.empty`.
.. method:: Signature.bind(*args, **kwargs)
Create a mapping from positional and keyword arguments to parameters.
Returns :class:`BoundArguments` if ``*args`` and ``**kwargs`` match the
signature, or raises a :exc:`TypeError`.
.. method:: Signature.bind_partial(*args, **kwargs)
Works the same way as :meth:`Signature.bind`, but allows the omission of
some required arguments (mimics :func:`functools.partial` behavior.)
Returns :class:`BoundArguments`, or raises a :exc:`TypeError` if the
passed arguments do not match the signature.
.. method:: Signature.replace(*[, parameters][, return_annotation])
Create a new Signature instance based on the instance replace was invoked
on. It is possible to pass different ``parameters`` and/or
``return_annotation`` to override the corresponding properties of the base
signature. To remove return_annotation from the copied Signature, pass in
:attr:`Signature.empty`.
::
>>> def test(a, b):
... pass
>>> sig = signature(test)
>>> new_sig = sig.replace(return_annotation="new return anno")
>>> str(new_sig)
"(a, b) -> 'new return anno'"
.. class:: Parameter
Parameter objects are *immutable*. Instead of modifying a Parameter object,
you can use :meth:`Parameter.replace` to create a modified copy.
.. attribute:: Parameter.empty
A special class-level marker to specify absence of default values and
annotations.
.. attribute:: Parameter.name
The name of the parameter as a string. Must be a valid python identifier
name (with the exception of ``POSITIONAL_ONLY`` parameters, which can have
it set to ``None``).
.. attribute:: Parameter.default
The default value for the parameter. If the parameter has no default
value, this attribute is set to :attr:`Parameter.empty`.
.. attribute:: Parameter.annotation
The annotation for the parameter. If the parameter has no annotation,
this attribute is set to :attr:`Parameter.empty`.
.. attribute:: Parameter.kind
Describes how argument values are bound to the parameter. Possible values
(accessible via :class:`Parameter`, like ``Parameter.KEYWORD_ONLY``):
+------------------------+----------------------------------------------+
| Name | Meaning |
+========================+==============================================+
| *POSITIONAL_ONLY* | Value must be supplied as a positional |
| | argument. |
| | |
| | Python has no explicit syntax for defining |
| | positional-only parameters, but many built-in|
| | and extension module functions (especially |
| | those that accept only one or two parameters)|
| | accept them. |
+------------------------+----------------------------------------------+
| *POSITIONAL_OR_KEYWORD*| Value may be supplied as either a keyword or |
| | positional argument (this is the standard |
| | binding behaviour for functions implemented |
| | in Python.) |
+------------------------+----------------------------------------------+
| *VAR_POSITIONAL* | A tuple of positional arguments that aren't |
| | bound to any other parameter. This |
| | corresponds to a ``*args`` parameter in a |
| | Python function definition. |
+------------------------+----------------------------------------------+
| *KEYWORD_ONLY* | Value must be supplied as a keyword argument.|
| | Keyword only parameters are those which |
| | appear after a ``*`` or ``*args`` entry in a |
| | Python function definition. |
+------------------------+----------------------------------------------+
| *VAR_KEYWORD* | A dict of keyword arguments that aren't bound|
| | to any other parameter. This corresponds to a|
| | ``**kwargs`` parameter in a Python function |
| | definition. |
+------------------------+----------------------------------------------+
Example: print all keyword-only arguments without default values::
>>> def foo(a, b, *, c, d=10):
... pass
>>> sig = signature(foo)
>>> for param in sig.parameters.values():
... if (param.kind == param.KEYWORD_ONLY and
... param.default is param.empty):
... print('Parameter:', param)
Parameter: c
.. method:: Parameter.replace(*[, name][, kind][, default][, annotation])
Create a new Parameter instance based on the instance replaced was invoked
on. To override a :class:`Parameter` attribute, pass the corresponding
argument. To remove a default value or/and an annotation from a
Parameter, pass :attr:`Parameter.empty`.
::
>>> from funcsigs import Parameter
>>> param = Parameter('foo', Parameter.KEYWORD_ONLY, default=42)
>>> str(param)
'foo=42'
>>> str(param.replace()) # Will create a shallow copy of 'param'
'foo=42'
>>> str(param.replace(default=Parameter.empty, annotation='spam'))
"foo:'spam'"
.. class:: BoundArguments
Result of a :meth:`Signature.bind` or :meth:`Signature.bind_partial` call.
Holds the mapping of arguments to the function's parameters.
.. attribute:: BoundArguments.arguments
An ordered, mutable mapping (:class:`collections.OrderedDict`) of
parameters' names to arguments' values. Contains only explicitly bound
arguments. Changes in :attr:`arguments` will reflect in :attr:`args` and
:attr:`kwargs`.
Should be used in conjunction with :attr:`Signature.parameters` for any
argument processing purposes.
.. note::
Arguments for which :meth:`Signature.bind` or
:meth:`Signature.bind_partial` relied on a default value are skipped.
However, if needed, it is easy to include them.
::
>>> def foo(a, b=10):
... pass
>>> sig = signature(foo)
>>> ba = sig.bind(5)
>>> ba.args, ba.kwargs
((5,), {})
>>> for param in sig.parameters.values():
... if param.name not in ba.arguments:
... ba.arguments[param.name] = param.default
>>> ba.args, ba.kwargs
((5, 10), {})
.. attribute:: BoundArguments.args
A tuple of positional arguments values. Dynamically computed from the
:attr:`arguments` attribute.
.. attribute:: BoundArguments.kwargs
A dict of keyword arguments values. Dynamically computed from the
:attr:`arguments` attribute.
The :attr:`args` and :attr:`kwargs` properties can be used to invoke
functions::
def test(a, *, b):
...
sig = signature(test)
ba = sig.bind(10, b=20)
test(*ba.args, **ba.kwargs)
.. seealso::
:pep:`362` - Function Signature Object.
The detailed specification, implementation details and examples.
Copyright
---------
*funcsigs* is a derived work of CPython under the terms of the `PSF License
Agreement`_. The original CPython inspect module, its unit tests and
documentation are the copyright of the Python Software Foundation. The derived
work is distributed under the `Apache License Version 2.0`_.
.. _PSF License Agreement: http://docs.python.org/3/license.html#terms-and-conditions-for-accessing-or-otherwise-using-python
.. _Apache License Version 2.0: http://opensource.org/licenses/Apache-2.0
.. _GitHub: https://github.com/testing-cabal/funcsigs
.. _PSF License Agreement: http://docs.python.org/3/license.html#terms-and-conditions-for-accessing-or-otherwise-using-python
.. _Travis CI: http://travis-ci.org/
.. _Read The Docs: http://funcsigs.readthedocs.org/
.. _PEP 362: http://www.python.org/dev/peps/pep-0362/
.. _inspect: http://docs.python.org/3/library/inspect.html#introspecting-callables-with-the-signature-object
.. _issues system: https://github.com/testing-cabal/funcsigs/issues
.. |build_status| image:: https://secure.travis-ci.org/aliles/funcsigs.png?branch=master
:target: http://travis-ci.org/#!/aliles/funcsigs
:alt: Current build status
.. |coverage| image:: https://coveralls.io/repos/aliles/funcsigs/badge.png?branch=master
:target: https://coveralls.io/r/aliles/funcsigs?branch=master
:alt: Coverage status
.. |pypi_version| image:: https://pypip.in/v/funcsigs/badge.png
:target: https://crate.io/packages/funcsigs/
:alt: Latest PyPI version
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/funcsigs/docs/index.rst
| 0.797754 | 0.665399 |
index.rst
|
pypi
|
:mod:`crcmod` -- CRC calculation
================================
.. module:: crcmod
:synopsis: CRC calculation
.. moduleauthor:: Raymond L Buvel
.. sectionauthor:: Craig McQueen
This module provides a function factory :func:`mkCrcFun` and a class :class:`Crc`
for calculating CRCs of byte strings using common CRC algorithms.
.. note:: This documentation normally shows Python 2.x usage. Python 3.x usage is very similar,
with the main difference that input strings must be explicitly defined as
:keyword:`bytes` type, or an object that supports the buffer protocol. E.g.::
>>> crc_value = crc_function(b'123456789')
>>> crc_value = crc_function(bytearray((49, 50, 51, 52, 53, 54, 55, 56, 57)))
:func:`mkCrcFun` -- CRC function factory
----------------------------------------
The function factory provides a simple interface for CRC calculation.
.. function:: mkCrcFun(poly[, initCrc, rev, xorOut])
Function factory that returns a new function for calculating CRCs
using a specified CRC algorithm.
:param poly: The generator polynomial to use in calculating the CRC. The value
is specified as a Python integer or long integer. The bits in this integer
are the coefficients of the polynomial. The only polynomials allowed are
those that generate 8, 16, 24, 32, or 64 bit CRCs.
:param initCrc: Initial value used to start the CRC calculation. This initial
value should be the initial shift register value, reversed if it uses a
reversed algorithm, and then XORed with the final XOR value. That is
equivalent to the CRC result the algorithm should return for a
zero-length string. Defaults to all bits set because that starting value
will take leading zero bytes into account. Starting with zero will ignore
all leading zero bytes.
:param rev: A flag that selects a bit reversed algorithm when :keyword:`True`. Defaults to
:keyword:`True` because the bit reversed algorithms are more efficient.
:param xorOut: Final value to XOR with the calculated CRC value. Used by some
CRC algorithms. Defaults to zero.
:return: CRC calculation function
:rtype: function
The function that is returned is as follows:
.. function:: .crc_function(data[, crc=initCrc])
:param data: Data for which to calculate the CRC.
:type data: byte string
:param crc: Initial CRC value.
:return: Calculated CRC value.
:rtype: integer
Examples
^^^^^^^^
**CRC-32** Example::
>>> import crcmod
>>> crc32_func = crcmod.mkCrcFun(0x104c11db7, initCrc=0, xorOut=0xFFFFFFFF)
>>> hex(crc32_func('123456789'))
'0xcbf43926L'
The CRC-32 uses a "reversed" algorithm, used for many common CRC algorithms.
Less common is the non-reversed algorithm, as used by the 16-bit **XMODEM** CRC::
>>> xmodem_crc_func = crcmod.mkCrcFun(0x11021, rev=False, initCrc=0x0000, xorOut=0x0000)
>>> hex(xmodem_crc_func('123456789'))
'0x31c3'
The CRC function can be called multiple times. On subsequent calls, pass the
CRC value previously calculated as a second parameter::
>>> crc_value = crc32_func('1234')
>>> crc_value = crc32_func('56789', crc_value)
>>> hex(crc_value)
'0xcbf43926L'
Python 3.x example: Unicode strings are not accepted as input. Byte strings are acceptable.
You may calculate a CRC for an object that implements the buffer protocol::
>>> import crcmod
>>> crc32_func = crcmod.mkCrcFun(0x104c11db7, initCrc=0, xorOut=0xFFFFFFFF)
>>> hex(crc32_func('123456789'))
...
TypeError: Unicode-objects must be encoded before calculating a CRC
>>> hex(crc32_func(b'123456789'))
'0xcbf43926'
>>> hex(crc32_func(bytearray((49, 50, 51, 52, 53, 54, 55, 56, 57))))
'0xcbf43926'
Class :class:`Crc`
------------------
The class provides an interface similar to the Python :mod:`hashlib`, :mod:`md5` and :mod:`sha` modules.
.. class:: Crc(poly[, initCrc, rev, xorOut])
Returns a new :class:`Crc` object for calculating CRCs using a specified CRC algorithm.
The parameters are the same as those for the factory function :func:`mkCrcFun`.
:param poly: The generator polynomial to use in calculating the CRC. The value
is specified as a Python integer or long integer. The bits in this integer
are the coefficients of the polynomial. The only polynomials allowed are
those that generate 8, 16, 24, 32, or 64 bit CRCs.
:param initCrc: Initial value used to start the CRC calculation. This initial
value should be the initial shift register value, reversed if it uses a
reversed algorithm, and then XORed with the final XOR value. That is
equivalent to the CRC result the algorithm should return for a
zero-length string. Defaults to all bits set because that starting value
will take leading zero bytes into account. Starting with zero will ignore
all leading zero bytes.
:param rev: A flag that selects a bit reversed algorithm when :keyword:`True`. Defaults to
:keyword:`True` because the bit reversed algorithms are more efficient.
:param xorOut: Final value to XOR with the calculated CRC value. Used by some
CRC algorithms. Defaults to zero.
:class:`Crc` objects contain the following constant values:
.. attribute:: digest_size
The size of the resulting digest in bytes. This depends on the width of the CRC polynomial.
E.g. for a 32-bit CRC, :data:`digest_size` will be ``4``.
.. attribute:: crcValue
The calculated CRC value, as an integer, for the data that has been input
using :meth:`update`. This value is updated after each call to :meth:`update`.
:class:`Crc` objects support the following methods:
.. method:: new([arg])
Create a new instance of the :class:`Crc` class initialized to the same
values as the original instance. The CRC value is set to the initial
value. If a string is provided in the optional ``arg`` parameter, it is
passed to the :meth:`update` method.
.. method:: copy()
Create a new instance of the :class:`Crc` class initialized to the same
values as the original instance. The CRC value is copied from the current
value. This allows multiple CRC calculations using a common initial
string.
.. method:: update(data)
:param data: Data for which to calculate the CRC
:type data: byte string
Update the calculated CRC value for the specified input data.
.. method:: digest()
Return the current CRC value as a string of bytes. The length of
this string is specified in the :attr:`digest_size` attribute.
.. method:: hexdigest()
Return the current CRC value as a string of hex digits. The length
of this string is twice the :attr:`digest_size` attribute.
.. method:: generateCode(functionName, out, [dataType, crcType])
Generate a C/C++ function.
:param functionName: String specifying the name of the function.
:param out: An open file-like object with a write method.
This specifies where the generated code is written.
:param dataType: An optional parameter specifying the data type of the input
data to the function. Defaults to ``UINT8``.
:param crcType: An optional parameter specifying the data type of the CRC value.
Defaults to one of ``UINT8``, ``UINT16``, ``UINT32``, or ``UINT64`` depending
on the size of the CRC value.
Examples
^^^^^^^^
**CRC-32** Example::
>>> import crcmod
>>> crc32 = crcmod.Crc(0x104c11db7, initCrc=0, xorOut=0xFFFFFFFF)
>>> crc32.update('123456789')
>>> hex(crc32.crcValue)
'0xcbf43926L'
>>> crc32.hexdigest()
'CBF43926'
The :meth:`Crc.update` method can be called multiple times, and the CRC value is updated with each call::
>>> crc32new = crc32.new()
>>> crc32new.update('1234')
>>> crc32new.hexdigest()
'9BE3E0A3'
>>> crc32new.update('56789')
>>> crc32new.hexdigest()
'CBF43926'
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/crcmod/docs/source/crcmod.rst
| 0.966331 | 0.688678 |
crcmod.rst
|
pypi
|
def _get_buffer_view(in_obj):
if isinstance(in_obj, str):
raise TypeError('Unicode-objects must be encoded before calculating a CRC')
mv = memoryview(in_obj)
if mv.ndim > 1:
raise BufferError('Buffer must be single dimension')
return mv
def _crc8(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFF
for x in mv.tobytes():
crc = table[x ^ crc]
return crc
def _crc8r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFF
for x in mv.tobytes():
crc = table[x ^ crc]
return crc
def _crc16(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFF
for x in mv.tobytes():
crc = table[x ^ ((crc>>8) & 0xFF)] ^ ((crc << 8) & 0xFF00)
return crc
def _crc16r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFF
for x in mv.tobytes():
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc24(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFF
for x in mv.tobytes():
crc = table[x ^ (crc>>16 & 0xFF)] ^ ((crc << 8) & 0xFFFF00)
return crc
def _crc24r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFF
for x in mv.tobytes():
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc32(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFFFF
for x in mv.tobytes():
crc = table[x ^ ((crc>>24) & 0xFF)] ^ ((crc << 8) & 0xFFFFFF00)
return crc
def _crc32r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFFFF
for x in mv.tobytes():
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc64(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFFFFFFFFFFFF
for x in mv.tobytes():
crc = table[x ^ ((crc>>56) & 0xFF)] ^ ((crc << 8) & 0xFFFFFFFFFFFFFF00)
return crc
def _crc64r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFFFFFFFFFFFF
for x in mv.tobytes():
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/crcmod/python3/crcmod/_crcfunpy.py
| 0.566738 | 0.152916 |
_crcfunpy.py
|
pypi
|
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
maxInt = univ.Integer(2147483647)
class LDAPString(univ.OctetString):
pass
class LDAPOID(univ.OctetString):
pass
class LDAPDN(LDAPString):
pass
class RelativeLDAPDN(LDAPString):
pass
class AttributeType(LDAPString):
pass
class AttributeDescription(LDAPString):
pass
class AttributeDescriptionList(univ.SequenceOf):
componentType = AttributeDescription()
class AttributeValue(univ.OctetString):
pass
class AssertionValue(univ.OctetString):
pass
class AttributeValueAssertion(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('attributeDesc', AttributeDescription()),
namedtype.NamedType('assertionValue', AssertionValue())
)
class Attribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeDescription()),
namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
)
class MatchingRuleId(LDAPString):
pass
class Control(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('controlType', LDAPOID()),
namedtype.DefaultedNamedType('criticality', univ.Boolean('False')),
namedtype.OptionalNamedType('controlValue', univ.OctetString())
)
class Controls(univ.SequenceOf):
componentType = Control()
class LDAPURL(LDAPString):
pass
class Referral(univ.SequenceOf):
componentType = LDAPURL()
class SaslCredentials(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('mechanism', LDAPString()),
namedtype.OptionalNamedType('credentials', univ.OctetString())
)
class AuthenticationChoice(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('simple', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('reserved-1', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('reserved-2', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('sasl',
SaslCredentials().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class BindRequest(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 0)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 127))),
namedtype.NamedType('name', LDAPDN()),
namedtype.NamedType('authentication', AuthenticationChoice())
)
class PartialAttributeList(univ.SequenceOf):
componentType = univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('type', AttributeDescription()),
namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
)
)
class SearchResultEntry(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 4)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('objectName', LDAPDN()),
namedtype.NamedType('attributes', PartialAttributeList())
)
class MatchingRuleAssertion(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('matchingRule', MatchingRuleId().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('type', AttributeDescription().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('matchValue',
AssertionValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.DefaultedNamedType('dnAttributes', univ.Boolean('False').subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
)
class SubstringFilter(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeDescription()),
namedtype.NamedType('substrings',
univ.SequenceOf(
componentType=univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType(
'initial', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType(
'any', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
namedtype.NamedType(
'final', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))
)
)
)
)
)
)
# Ugly hack to handle recursive Filter reference (up to 3-levels deep).
class Filter3(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
namedtype.NamedType('substrings', SubstringFilter().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
namedtype.NamedType('present', AttributeDescription().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
)
class Filter2(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('and', univ.SetOf(componentType=Filter3()).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('or', univ.SetOf(componentType=Filter3()).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('not',
Filter3().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
namedtype.NamedType('substrings', SubstringFilter().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
namedtype.NamedType('present', AttributeDescription().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
)
class Filter(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('and', univ.SetOf(componentType=Filter2()).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('or', univ.SetOf(componentType=Filter2()).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('not',
Filter2().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
namedtype.NamedType('substrings', SubstringFilter().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
namedtype.NamedType('present', AttributeDescription().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
)
# End of Filter hack
class SearchRequest(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 3)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('baseObject', LDAPDN()),
namedtype.NamedType('scope', univ.Enumerated(
namedValues=namedval.NamedValues(('baseObject', 0), ('singleLevel', 1), ('wholeSubtree', 2)))),
namedtype.NamedType('derefAliases', univ.Enumerated(
namedValues=namedval.NamedValues(('neverDerefAliases', 0), ('derefInSearching', 1),
('derefFindingBaseObj', 2), ('derefAlways', 3)))),
namedtype.NamedType('sizeLimit',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))),
namedtype.NamedType('timeLimit',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))),
namedtype.NamedType('typesOnly', univ.Boolean()),
namedtype.NamedType('filter', Filter()),
namedtype.NamedType('attributes', AttributeDescriptionList())
)
class UnbindRequest(univ.Null):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2)
)
class BindResponse(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('resultCode', univ.Enumerated(
namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2),
('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5),
('compareTrue', 6), ('authMethodNotSupported', 7),
('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10),
('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12),
('confidentialityRequired', 13), ('saslBindInProgress', 14),
('noSuchAttribute', 16), ('undefinedAttributeType', 17),
('inappropriateMatching', 18), ('constraintViolation', 19),
('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21),
('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34),
('reserved-35', 35), ('aliasDereferencingProblem', 36),
('inappropriateAuthentication', 48), ('invalidCredentials', 49),
('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52),
('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64),
('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66),
('notAllowedOnRDN', 67), ('entryAlreadyExists', 68),
('objectClassModsProhibited', 69), ('reserved-70', 70),
('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81),
('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84),
('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87),
('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))),
namedtype.NamedType('matchedDN', LDAPDN()),
namedtype.NamedType('errorMessage', LDAPString()),
namedtype.OptionalNamedType('referral', Referral().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
namedtype.OptionalNamedType('serverSaslCreds', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)))
)
class LDAPResult(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('resultCode', univ.Enumerated(
namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2),
('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5),
('compareTrue', 6), ('authMethodNotSupported', 7),
('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10),
('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12),
('confidentialityRequired', 13), ('saslBindInProgress', 14),
('noSuchAttribute', 16), ('undefinedAttributeType', 17),
('inappropriateMatching', 18), ('constraintViolation', 19),
('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21),
('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34),
('reserved-35', 35), ('aliasDereferencingProblem', 36),
('inappropriateAuthentication', 48), ('invalidCredentials', 49),
('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52),
('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64),
('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66),
('notAllowedOnRDN', 67), ('entryAlreadyExists', 68),
('objectClassModsProhibited', 69), ('reserved-70', 70),
('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81),
('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84),
('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87),
('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))),
namedtype.NamedType('matchedDN', LDAPDN()),
namedtype.NamedType('errorMessage', LDAPString()),
namedtype.OptionalNamedType('referral', Referral().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
)
class SearchResultReference(univ.SequenceOf):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 19)
)
componentType = LDAPURL()
class SearchResultDone(LDAPResult):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 5)
)
class AttributeTypeAndValues(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeDescription()),
namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
)
class ModifyRequest(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 6)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('object', LDAPDN()),
namedtype.NamedType('modification',
univ.SequenceOf(
componentType=univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType(
'operation', univ.Enumerated(namedValues=namedval.NamedValues(('add', 0), ('delete', 1), ('replace', 2)))
),
namedtype.NamedType('modification', AttributeTypeAndValues())))
)
)
)
class ModifyResponse(LDAPResult):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 7)
)
class AttributeList(univ.SequenceOf):
componentType = univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('type', AttributeDescription()),
namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
)
)
class AddRequest(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 8)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('entry', LDAPDN()),
namedtype.NamedType('attributes', AttributeList())
)
class AddResponse(LDAPResult):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 9)
)
class DelRequest(LDAPResult):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 10)
)
class DelResponse(LDAPResult):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 11)
)
class ModifyDNRequest(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 12)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('entry', LDAPDN()),
namedtype.NamedType('newrdn', RelativeLDAPDN()),
namedtype.NamedType('deleteoldrdn', univ.Boolean()),
namedtype.OptionalNamedType('newSuperior',
LDAPDN().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class ModifyDNResponse(LDAPResult):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 13)
)
class CompareRequest(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 14)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('entry', LDAPDN()),
namedtype.NamedType('ava', AttributeValueAssertion())
)
class CompareResponse(LDAPResult):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 15)
)
class AbandonRequest(LDAPResult):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 16)
)
class ExtendedRequest(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 23)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('requestName',
LDAPOID().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('requestValue', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class ExtendedResponse(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 24)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('resultCode', univ.Enumerated(
namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2),
('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5),
('compareTrue', 6), ('authMethodNotSupported', 7),
('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10),
('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12),
('confidentialityRequired', 13), ('saslBindInProgress', 14),
('noSuchAttribute', 16), ('undefinedAttributeType', 17),
('inappropriateMatching', 18), ('constraintViolation', 19),
('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21),
('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34),
('reserved-35', 35), ('aliasDereferencingProblem', 36),
('inappropriateAuthentication', 48), ('invalidCredentials', 49),
('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52),
('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64),
('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66),
('notAllowedOnRDN', 67), ('entryAlreadyExists', 68),
('objectClassModsProhibited', 69), ('reserved-70', 70),
('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81),
('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84),
('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87),
('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))),
namedtype.NamedType('matchedDN', LDAPDN()),
namedtype.NamedType('errorMessage', LDAPString()),
namedtype.OptionalNamedType('referral', Referral().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
namedtype.OptionalNamedType('responseName', LDAPOID().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 10))),
namedtype.OptionalNamedType('response', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 11)))
)
class MessageID(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
0, maxInt
)
class LDAPMessage(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('messageID', MessageID()),
namedtype.NamedType(
'protocolOp', univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('bindRequest', BindRequest()),
namedtype.NamedType('bindResponse', BindResponse()),
namedtype.NamedType('unbindRequest', UnbindRequest()),
namedtype.NamedType('searchRequest', SearchRequest()),
namedtype.NamedType('searchResEntry', SearchResultEntry()),
namedtype.NamedType('searchResDone', SearchResultDone()),
namedtype.NamedType('searchResRef', SearchResultReference()),
namedtype.NamedType('modifyRequest', ModifyRequest()),
namedtype.NamedType('modifyResponse', ModifyResponse()),
namedtype.NamedType('addRequest', AddRequest()),
namedtype.NamedType('addResponse', AddResponse()),
namedtype.NamedType('delRequest', DelRequest()),
namedtype.NamedType('delResponse', DelResponse()),
namedtype.NamedType('modDNRequest', ModifyDNRequest()),
namedtype.NamedType('modDNResponse', ModifyDNResponse()),
namedtype.NamedType('compareRequest', CompareRequest()),
namedtype.NamedType('compareResponse', CompareResponse()),
namedtype.NamedType('abandonRequest', AbandonRequest()),
namedtype.NamedType('extendedReq', ExtendedRequest()),
namedtype.NamedType('extendedResp', ExtendedResponse())
)
)
),
namedtype.OptionalNamedType('controls', Controls().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc2251.py
| 0.67662 | 0.407068 |
rfc2251.py
|
pypi
|
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1_modules import rfc3280
from pyasn1_modules import rfc3281
MAX = float('inf')
def _buildOid(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
class AttributeValue(univ.Any):
pass
class Attribute(univ.Sequence):
pass
Attribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('attrType', univ.ObjectIdentifier()),
namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()))
)
class SignedAttributes(univ.SetOf):
pass
SignedAttributes.componentType = Attribute()
SignedAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class OtherRevocationInfoFormat(univ.Sequence):
pass
OtherRevocationInfoFormat.componentType = namedtype.NamedTypes(
namedtype.NamedType('otherRevInfoFormat', univ.ObjectIdentifier()),
namedtype.NamedType('otherRevInfo', univ.Any())
)
class RevocationInfoChoice(univ.Choice):
pass
RevocationInfoChoice.componentType = namedtype.NamedTypes(
namedtype.NamedType('crl', rfc3280.CertificateList()),
namedtype.NamedType('other', OtherRevocationInfoFormat().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class RevocationInfoChoices(univ.SetOf):
pass
RevocationInfoChoices.componentType = RevocationInfoChoice()
class OtherKeyAttribute(univ.Sequence):
pass
OtherKeyAttribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('keyAttrId', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('keyAttr', univ.Any())
)
id_signedData = _buildOid(1, 2, 840, 113549, 1, 7, 2)
class KeyEncryptionAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
pass
class EncryptedKey(univ.OctetString):
pass
class CMSVersion(univ.Integer):
pass
CMSVersion.namedValues = namedval.NamedValues(
('v0', 0),
('v1', 1),
('v2', 2),
('v3', 3),
('v4', 4),
('v5', 5)
)
class KEKIdentifier(univ.Sequence):
pass
KEKIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('keyIdentifier', univ.OctetString()),
namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
namedtype.OptionalNamedType('other', OtherKeyAttribute())
)
class KEKRecipientInfo(univ.Sequence):
pass
KEKRecipientInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('kekid', KEKIdentifier()),
namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
namedtype.NamedType('encryptedKey', EncryptedKey())
)
class KeyDerivationAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
pass
class PasswordRecipientInfo(univ.Sequence):
pass
PasswordRecipientInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.OptionalNamedType('keyDerivationAlgorithm', KeyDerivationAlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
namedtype.NamedType('encryptedKey', EncryptedKey())
)
class OtherRecipientInfo(univ.Sequence):
pass
OtherRecipientInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('oriType', univ.ObjectIdentifier()),
namedtype.NamedType('oriValue', univ.Any())
)
class IssuerAndSerialNumber(univ.Sequence):
pass
IssuerAndSerialNumber.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuer', rfc3280.Name()),
namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber())
)
class SubjectKeyIdentifier(univ.OctetString):
pass
class RecipientKeyIdentifier(univ.Sequence):
pass
RecipientKeyIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier()),
namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
namedtype.OptionalNamedType('other', OtherKeyAttribute())
)
class KeyAgreeRecipientIdentifier(univ.Choice):
pass
KeyAgreeRecipientIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('rKeyId', RecipientKeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class RecipientEncryptedKey(univ.Sequence):
pass
RecipientEncryptedKey.componentType = namedtype.NamedTypes(
namedtype.NamedType('rid', KeyAgreeRecipientIdentifier()),
namedtype.NamedType('encryptedKey', EncryptedKey())
)
class RecipientEncryptedKeys(univ.SequenceOf):
pass
RecipientEncryptedKeys.componentType = RecipientEncryptedKey()
class UserKeyingMaterial(univ.OctetString):
pass
class OriginatorPublicKey(univ.Sequence):
pass
OriginatorPublicKey.componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('publicKey', univ.BitString())
)
class OriginatorIdentifierOrKey(univ.Choice):
pass
OriginatorIdentifierOrKey.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('originatorKey', OriginatorPublicKey().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class KeyAgreeRecipientInfo(univ.Sequence):
pass
KeyAgreeRecipientInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('originator', OriginatorIdentifierOrKey().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('ukm', UserKeyingMaterial().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
namedtype.NamedType('recipientEncryptedKeys', RecipientEncryptedKeys())
)
class RecipientIdentifier(univ.Choice):
pass
RecipientIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class KeyTransRecipientInfo(univ.Sequence):
pass
KeyTransRecipientInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('rid', RecipientIdentifier()),
namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
namedtype.NamedType('encryptedKey', EncryptedKey())
)
class RecipientInfo(univ.Choice):
pass
RecipientInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('ktri', KeyTransRecipientInfo()),
namedtype.NamedType('kari', KeyAgreeRecipientInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('kekri', KEKRecipientInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
namedtype.NamedType('pwri', PasswordRecipientInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
namedtype.NamedType('ori', OtherRecipientInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
)
class RecipientInfos(univ.SetOf):
pass
RecipientInfos.componentType = RecipientInfo()
RecipientInfos.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class DigestAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
pass
class Signature(univ.BitString):
pass
class SignerIdentifier(univ.Choice):
pass
SignerIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class UnprotectedAttributes(univ.SetOf):
pass
UnprotectedAttributes.componentType = Attribute()
UnprotectedAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class ContentType(univ.ObjectIdentifier):
pass
class EncryptedContent(univ.OctetString):
pass
class ContentEncryptionAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
pass
class EncryptedContentInfo(univ.Sequence):
pass
EncryptedContentInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', ContentType()),
namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class EncryptedData(univ.Sequence):
pass
EncryptedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_contentType = _buildOid(1, 2, 840, 113549, 1, 9, 3)
id_data = _buildOid(1, 2, 840, 113549, 1, 7, 1)
id_messageDigest = _buildOid(1, 2, 840, 113549, 1, 9, 4)
class DigestAlgorithmIdentifiers(univ.SetOf):
pass
DigestAlgorithmIdentifiers.componentType = DigestAlgorithmIdentifier()
class EncapsulatedContentInfo(univ.Sequence):
pass
EncapsulatedContentInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('eContentType', ContentType()),
namedtype.OptionalNamedType('eContent', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Digest(univ.OctetString):
pass
class DigestedData(univ.Sequence):
pass
DigestedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
namedtype.NamedType('digest', Digest())
)
class ContentInfo(univ.Sequence):
pass
ContentInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', ContentType()),
namedtype.NamedType('content', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class UnauthAttributes(univ.SetOf):
pass
UnauthAttributes.componentType = Attribute()
UnauthAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class ExtendedCertificateInfo(univ.Sequence):
pass
ExtendedCertificateInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('certificate', rfc3280.Certificate()),
namedtype.NamedType('attributes', UnauthAttributes())
)
class SignatureAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
pass
class ExtendedCertificate(univ.Sequence):
pass
ExtendedCertificate.componentType = namedtype.NamedTypes(
namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
namedtype.NamedType('signature', Signature())
)
class OtherCertificateFormat(univ.Sequence):
pass
OtherCertificateFormat.componentType = namedtype.NamedTypes(
namedtype.NamedType('otherCertFormat', univ.ObjectIdentifier()),
namedtype.NamedType('otherCert', univ.Any())
)
class AttributeCertificateV2(rfc3281.AttributeCertificate):
pass
class AttCertVersionV1(univ.Integer):
pass
AttCertVersionV1.namedValues = namedval.NamedValues(
('v1', 0)
)
class AttributeCertificateInfoV1(univ.Sequence):
pass
AttributeCertificateInfoV1.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', AttCertVersionV1().subtype(value="v1")),
namedtype.NamedType(
'subject', univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('baseCertificateID', rfc3281.IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('subjectName', rfc3280.GeneralNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
)
),
namedtype.NamedType('issuer', rfc3280.GeneralNames()),
namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()),
namedtype.NamedType('attCertValidityPeriod', rfc3281.AttCertValidityPeriod()),
namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())),
namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()),
namedtype.OptionalNamedType('extensions', rfc3280.Extensions())
)
class AttributeCertificateV1(univ.Sequence):
pass
AttributeCertificateV1.componentType = namedtype.NamedTypes(
namedtype.NamedType('acInfo', AttributeCertificateInfoV1()),
namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
class CertificateChoices(univ.Choice):
pass
CertificateChoices.componentType = namedtype.NamedTypes(
namedtype.NamedType('certificate', rfc3280.Certificate()),
namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('v1AttrCert', AttributeCertificateV1().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('v2AttrCert', AttributeCertificateV2().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('other', OtherCertificateFormat().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
)
class CertificateSet(univ.SetOf):
pass
CertificateSet.componentType = CertificateChoices()
class MessageAuthenticationCode(univ.OctetString):
pass
class UnsignedAttributes(univ.SetOf):
pass
UnsignedAttributes.componentType = Attribute()
UnsignedAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class SignatureValue(univ.OctetString):
pass
class SignerInfo(univ.Sequence):
pass
SignerInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('sid', SignerIdentifier()),
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.OptionalNamedType('signedAttrs', SignedAttributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
namedtype.NamedType('signature', SignatureValue()),
namedtype.OptionalNamedType('unsignedAttrs', UnsignedAttributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class SignerInfos(univ.SetOf):
pass
SignerInfos.componentType = SignerInfo()
class SignedData(univ.Sequence):
pass
SignedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
namedtype.OptionalNamedType('certificates', CertificateSet().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('signerInfos', SignerInfos())
)
class MessageAuthenticationCodeAlgorithm(rfc3280.AlgorithmIdentifier):
pass
class MessageDigest(univ.OctetString):
pass
class Time(univ.Choice):
pass
Time.componentType = namedtype.NamedTypes(
namedtype.NamedType('utcTime', useful.UTCTime()),
namedtype.NamedType('generalTime', useful.GeneralizedTime())
)
class OriginatorInfo(univ.Sequence):
pass
OriginatorInfo.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('certs', CertificateSet().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class AuthAttributes(univ.SetOf):
pass
AuthAttributes.componentType = Attribute()
AuthAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class AuthenticatedData(univ.Sequence):
pass
AuthenticatedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('recipientInfos', RecipientInfos()),
namedtype.NamedType('macAlgorithm', MessageAuthenticationCodeAlgorithm()),
namedtype.OptionalNamedType('digestAlgorithm', DigestAlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
namedtype.OptionalNamedType('authAttrs', AuthAttributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('mac', MessageAuthenticationCode()),
namedtype.OptionalNamedType('unauthAttrs', UnauthAttributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
id_ct_contentInfo = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 6)
id_envelopedData = _buildOid(1, 2, 840, 113549, 1, 7, 3)
class EnvelopedData(univ.Sequence):
pass
EnvelopedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('recipientInfos', RecipientInfos()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class Countersignature(SignerInfo):
pass
id_digestedData = _buildOid(1, 2, 840, 113549, 1, 7, 5)
id_signingTime = _buildOid(1, 2, 840, 113549, 1, 9, 5)
class ExtendedCertificateOrCertificate(univ.Choice):
pass
ExtendedCertificateOrCertificate.componentType = namedtype.NamedTypes(
namedtype.NamedType('certificate', rfc3280.Certificate()),
namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
id_encryptedData = _buildOid(1, 2, 840, 113549, 1, 7, 6)
id_ct_authData = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 2)
class SigningTime(Time):
pass
id_countersignature = _buildOid(1, 2, 840, 113549, 1, 9, 6)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc3852.py
| 0.661486 | 0.35987 |
rfc3852.py
|
pypi
|
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import tag
from pyasn1.type import univ
class Integer(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
-2147483648, 2147483647
)
class Integer32(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
-2147483648, 2147483647
)
class OctetString(univ.OctetString):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(
0, 65535
)
class IpAddress(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x00)
)
subtypeSpec = univ.OctetString.subtypeSpec + constraint.ValueSizeConstraint(
4, 4
)
class Counter32(univ.Integer):
tagSet = univ.Integer.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x01)
)
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
0, 4294967295
)
class Gauge32(univ.Integer):
tagSet = univ.Integer.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x02)
)
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
0, 4294967295
)
class Unsigned32(univ.Integer):
tagSet = univ.Integer.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x02)
)
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
0, 4294967295
)
class TimeTicks(univ.Integer):
tagSet = univ.Integer.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x03)
)
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
0, 4294967295
)
class Opaque(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x04)
)
class Counter64(univ.Integer):
tagSet = univ.Integer.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x06)
)
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
0, 18446744073709551615
)
class Bits(univ.OctetString):
pass
class ObjectName(univ.ObjectIdentifier):
pass
class SimpleSyntax(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('integer-value', Integer()),
namedtype.NamedType('string-value', OctetString()),
namedtype.NamedType('objectID-value', univ.ObjectIdentifier())
)
class ApplicationSyntax(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('ipAddress-value', IpAddress()),
namedtype.NamedType('counter-value', Counter32()),
namedtype.NamedType('timeticks-value', TimeTicks()),
namedtype.NamedType('arbitrary-value', Opaque()),
namedtype.NamedType('big-counter-value', Counter64()),
# This conflicts with Counter32
# namedtype.NamedType('unsigned-integer-value', Unsigned32()),
namedtype.NamedType('gauge32-value', Gauge32())
) # BITS misplaced?
class ObjectSyntax(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('simple', SimpleSyntax()),
namedtype.NamedType('application-wide', ApplicationSyntax())
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc1902.py
| 0.736211 | 0.395659 |
rfc1902.py
|
pypi
|
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
MAX = float('inf')
def _OID(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
unformatted_postal_address = univ.Integer(16)
ub_organizational_units = univ.Integer(4)
ub_organizational_unit_name_length = univ.Integer(32)
class OrganizationalUnitName(char.PrintableString):
pass
OrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
class OrganizationalUnitNames(univ.SequenceOf):
pass
OrganizationalUnitNames.componentType = OrganizationalUnitName()
OrganizationalUnitNames.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
class AttributeType(univ.ObjectIdentifier):
pass
id_at = _OID(2, 5, 4)
id_at_name = _OID(id_at, 41)
ub_pds_parameter_length = univ.Integer(30)
class PDSParameter(univ.Set):
pass
PDSParameter.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
)
class PhysicalDeliveryOrganizationName(PDSParameter):
pass
ub_organization_name_length = univ.Integer(64)
ub_domain_defined_attribute_type_length = univ.Integer(8)
ub_domain_defined_attribute_value_length = univ.Integer(128)
class TeletexDomainDefinedAttribute(univ.Sequence):
pass
TeletexDomainDefinedAttribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
namedtype.NamedType('value', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
)
id_pkix = _OID(1, 3, 6, 1, 5, 5, 7)
id_qt = _OID(id_pkix, 2)
class PresentationAddress(univ.Sequence):
pass
PresentationAddress.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class AlgorithmIdentifier(univ.Sequence):
pass
AlgorithmIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('parameters', univ.Any())
)
class UniqueIdentifier(univ.BitString):
pass
class Extension(univ.Sequence):
pass
Extension.componentType = namedtype.NamedTypes(
namedtype.NamedType('extnID', univ.ObjectIdentifier()),
namedtype.DefaultedNamedType('critical', univ.Boolean().subtype(value=0)),
namedtype.NamedType('extnValue', univ.OctetString())
)
class Extensions(univ.SequenceOf):
pass
Extensions.componentType = Extension()
Extensions.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class CertificateSerialNumber(univ.Integer):
pass
class SubjectPublicKeyInfo(univ.Sequence):
pass
SubjectPublicKeyInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', AlgorithmIdentifier()),
namedtype.NamedType('subjectPublicKey', univ.BitString())
)
class Time(univ.Choice):
pass
Time.componentType = namedtype.NamedTypes(
namedtype.NamedType('utcTime', useful.UTCTime()),
namedtype.NamedType('generalTime', useful.GeneralizedTime())
)
class Validity(univ.Sequence):
pass
Validity.componentType = namedtype.NamedTypes(
namedtype.NamedType('notBefore', Time()),
namedtype.NamedType('notAfter', Time())
)
class Version(univ.Integer):
pass
Version.namedValues = namedval.NamedValues(
('v1', 0),
('v2', 1),
('v3', 2)
)
class AttributeValue(univ.Any):
pass
class AttributeTypeAndValue(univ.Sequence):
pass
AttributeTypeAndValue.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('value', AttributeValue())
)
class RelativeDistinguishedName(univ.SetOf):
pass
RelativeDistinguishedName.componentType = AttributeTypeAndValue()
RelativeDistinguishedName.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class RDNSequence(univ.SequenceOf):
pass
RDNSequence.componentType = RelativeDistinguishedName()
class Name(univ.Choice):
pass
Name.componentType = namedtype.NamedTypes(
namedtype.NamedType('rdnSequence', RDNSequence())
)
class TBSCertificate(univ.Sequence):
pass
TBSCertificate.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version',
Version().subtype(explicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value="v1")),
namedtype.NamedType('serialNumber', CertificateSerialNumber()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('validity', Validity()),
namedtype.NamedType('subject', Name()),
namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('extensions',
Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class Certificate(univ.Sequence):
pass
Certificate.componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertificate', TBSCertificate()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
ub_surname_length = univ.Integer(40)
class TeletexOrganizationName(char.TeletexString):
pass
TeletexOrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
ub_e163_4_sub_address_length = univ.Integer(40)
teletex_common_name = univ.Integer(2)
ub_country_name_alpha_length = univ.Integer(2)
ub_country_name_numeric_length = univ.Integer(3)
class CountryName(univ.Choice):
pass
CountryName.tagSet = univ.Choice.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1))
CountryName.componentType = namedtype.NamedTypes(
namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
)
extension_OR_address_components = univ.Integer(12)
id_at_dnQualifier = _OID(id_at, 46)
ub_e163_4_number_length = univ.Integer(15)
class ExtendedNetworkAddress(univ.Choice):
pass
ExtendedNetworkAddress.componentType = namedtype.NamedTypes(
namedtype.NamedType('e163-4-address', univ.Sequence(componentType=namedtype.NamedTypes(
namedtype.NamedType('number', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
))
),
namedtype.NamedType('psap-address', PresentationAddress().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
terminal_type = univ.Integer(23)
id_domainComponent = _OID(0, 9, 2342, 19200300, 100, 1, 25)
ub_state_name = univ.Integer(128)
class X520StateOrProvinceName(univ.Choice):
pass
X520StateOrProvinceName.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
)
ub_organization_name = univ.Integer(64)
class X520OrganizationName(univ.Choice):
pass
X520OrganizationName.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
)
ub_emailaddress_length = univ.Integer(128)
class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
pass
id_at_surname = _OID(id_at, 4)
ub_common_name_length = univ.Integer(64)
id_ad = _OID(id_pkix, 48)
ub_numeric_user_id_length = univ.Integer(32)
class NumericUserIdentifier(char.NumericString):
pass
NumericUserIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
class OrganizationName(char.PrintableString):
pass
OrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
ub_domain_name_length = univ.Integer(16)
class AdministrationDomainName(univ.Choice):
pass
AdministrationDomainName.tagSet = univ.Choice.tagSet.tagExplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2))
AdministrationDomainName.componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
namedtype.NamedType('printable', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
)
class PrivateDomainName(univ.Choice):
pass
PrivateDomainName.componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
namedtype.NamedType('printable', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
)
ub_generation_qualifier_length = univ.Integer(3)
ub_given_name_length = univ.Integer(16)
ub_initials_length = univ.Integer(5)
class PersonalName(univ.Set):
pass
PersonalName.componentType = namedtype.NamedTypes(
namedtype.NamedType('surname', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
ub_terminal_id_length = univ.Integer(24)
class TerminalIdentifier(char.PrintableString):
pass
TerminalIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_terminal_id_length)
ub_x121_address_length = univ.Integer(16)
class X121Address(char.NumericString):
pass
X121Address.subtypeSpec = constraint.ValueSizeConstraint(1, ub_x121_address_length)
class NetworkAddress(X121Address):
pass
class BuiltInStandardAttributes(univ.Sequence):
pass
BuiltInStandardAttributes.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('country-name', CountryName()),
namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
)
ub_domain_defined_attributes = univ.Integer(4)
class BuiltInDomainDefinedAttribute(univ.Sequence):
pass
BuiltInDomainDefinedAttribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
namedtype.NamedType('value', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
)
class BuiltInDomainDefinedAttributes(univ.SequenceOf):
pass
BuiltInDomainDefinedAttributes.componentType = BuiltInDomainDefinedAttribute()
BuiltInDomainDefinedAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
ub_extension_attributes = univ.Integer(256)
class ExtensionAttribute(univ.Sequence):
pass
ExtensionAttribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('extension-attribute-type', univ.Integer().subtype(
subtypeSpec=constraint.ValueRangeConstraint(0, ub_extension_attributes)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('extension-attribute-value',
univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class ExtensionAttributes(univ.SetOf):
pass
ExtensionAttributes.componentType = ExtensionAttribute()
ExtensionAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, ub_extension_attributes)
class ORAddress(univ.Sequence):
pass
ORAddress.componentType = namedtype.NamedTypes(
namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
)
id_pe = _OID(id_pkix, 1)
ub_title = univ.Integer(64)
class X520Title(univ.Choice):
pass
X520Title.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
)
id_at_organizationalUnitName = _OID(id_at, 11)
class EmailAddress(char.IA5String):
pass
EmailAddress.subtypeSpec = constraint.ValueSizeConstraint(1, ub_emailaddress_length)
physical_delivery_country_name = univ.Integer(8)
id_at_givenName = _OID(id_at, 42)
class TeletexCommonName(char.TeletexString):
pass
TeletexCommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
id_qt_cps = _OID(id_qt, 1)
class LocalPostalAttributes(PDSParameter):
pass
class StreetAddress(PDSParameter):
pass
id_kp = _OID(id_pkix, 3)
class DirectoryString(univ.Choice):
pass
DirectoryString.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class DomainComponent(char.IA5String):
pass
id_at_initials = _OID(id_at, 43)
id_qt_unotice = _OID(id_qt, 2)
ub_pds_name_length = univ.Integer(16)
class PDSName(char.PrintableString):
pass
PDSName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_pds_name_length)
class PosteRestanteAddress(PDSParameter):
pass
class DistinguishedName(RDNSequence):
pass
class CommonName(char.PrintableString):
pass
CommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
ub_serial_number = univ.Integer(64)
class X520SerialNumber(char.PrintableString):
pass
X520SerialNumber.subtypeSpec = constraint.ValueSizeConstraint(1, ub_serial_number)
id_at_generationQualifier = _OID(id_at, 44)
ub_organizational_unit_name = univ.Integer(64)
id_ad_ocsp = _OID(id_ad, 1)
class TeletexOrganizationalUnitName(char.TeletexString):
pass
TeletexOrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
class TeletexPersonalName(univ.Set):
pass
TeletexPersonalName.componentType = namedtype.NamedTypes(
namedtype.NamedType('surname', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class TeletexDomainDefinedAttributes(univ.SequenceOf):
pass
TeletexDomainDefinedAttributes.componentType = TeletexDomainDefinedAttribute()
TeletexDomainDefinedAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
class TBSCertList(univ.Sequence):
pass
TBSCertList.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('version', Version()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('thisUpdate', Time()),
namedtype.OptionalNamedType('nextUpdate', Time()),
namedtype.OptionalNamedType('revokedCertificates',
univ.SequenceOf(componentType=univ.Sequence(componentType=namedtype.NamedTypes(
namedtype.NamedType('userCertificate', CertificateSerialNumber()),
namedtype.NamedType('revocationDate', Time()),
namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
))
)),
namedtype.OptionalNamedType('crlExtensions',
Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
local_postal_attributes = univ.Integer(21)
pkcs_9 = _OID(1, 2, 840, 113549, 1, 9)
class PhysicalDeliveryCountryName(univ.Choice):
pass
PhysicalDeliveryCountryName.componentType = namedtype.NamedTypes(
namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
)
ub_name = univ.Integer(32768)
class X520name(univ.Choice):
pass
X520name.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
)
id_emailAddress = _OID(pkcs_9, 1)
class TerminalType(univ.Integer):
pass
TerminalType.namedValues = namedval.NamedValues(
('telex', 3),
('teletex', 4),
('g3-facsimile', 5),
('g4-facsimile', 6),
('ia5-terminal', 7),
('videotex', 8)
)
class X520OrganizationalUnitName(univ.Choice):
pass
X520OrganizationalUnitName.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
)
id_at_commonName = _OID(id_at, 3)
pds_name = univ.Integer(7)
post_office_box_address = univ.Integer(18)
ub_locality_name = univ.Integer(128)
class X520LocalityName(univ.Choice):
pass
X520LocalityName.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
)
id_ad_timeStamping = _OID(id_ad, 3)
id_at_countryName = _OID(id_at, 6)
physical_delivery_personal_name = univ.Integer(13)
teletex_personal_name = univ.Integer(4)
teletex_organizational_unit_names = univ.Integer(5)
class PhysicalDeliveryPersonalName(PDSParameter):
pass
ub_postal_code_length = univ.Integer(16)
class PostalCode(univ.Choice):
pass
PostalCode.componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
namedtype.NamedType('printable-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
)
class X520countryName(char.PrintableString):
pass
X520countryName.subtypeSpec = constraint.ValueSizeConstraint(2, 2)
postal_code = univ.Integer(9)
id_ad_caRepository = _OID(id_ad, 5)
extension_physical_delivery_address_components = univ.Integer(15)
class PostOfficeBoxAddress(PDSParameter):
pass
class PhysicalDeliveryOfficeName(PDSParameter):
pass
id_at_title = _OID(id_at, 12)
id_at_serialNumber = _OID(id_at, 5)
id_ad_caIssuers = _OID(id_ad, 2)
ub_integer_options = univ.Integer(256)
class CertificateList(univ.Sequence):
pass
CertificateList.componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertList', TBSCertList()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
class PhysicalDeliveryOfficeNumber(PDSParameter):
pass
class TeletexOrganizationalUnitNames(univ.SequenceOf):
pass
TeletexOrganizationalUnitNames.componentType = TeletexOrganizationalUnitName()
TeletexOrganizationalUnitNames.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
physical_delivery_office_name = univ.Integer(10)
ub_common_name = univ.Integer(64)
class ExtensionORAddressComponents(PDSParameter):
pass
ub_pseudonym = univ.Integer(128)
poste_restante_address = univ.Integer(19)
id_at_organizationName = _OID(id_at, 10)
physical_delivery_office_number = univ.Integer(11)
id_at_pseudonym = _OID(id_at, 65)
class X520CommonName(univ.Choice):
pass
X520CommonName.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
)
physical_delivery_organization_name = univ.Integer(14)
class X520dnQualifier(char.PrintableString):
pass
id_at_stateOrProvinceName = _OID(id_at, 8)
common_name = univ.Integer(1)
id_at_localityName = _OID(id_at, 7)
ub_match = univ.Integer(128)
ub_unformatted_address_length = univ.Integer(180)
class Attribute(univ.Sequence):
pass
Attribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue()))
)
extended_network_address = univ.Integer(22)
unique_postal_name = univ.Integer(20)
ub_pds_physical_address_lines = univ.Integer(6)
class UnformattedPostalAddress(univ.Set):
pass
UnformattedPostalAddress.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))),
namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
)
class UniquePostalName(PDSParameter):
pass
class X520Pseudonym(univ.Choice):
pass
X520Pseudonym.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym)))
)
teletex_organization_name = univ.Integer(3)
teletex_domain_defined_attributes = univ.Integer(6)
street_address = univ.Integer(17)
id_kp_OCSPSigning = _OID(id_kp, 9)
id_ce = _OID(2, 5, 29)
id_ce_certificatePolicies = _OID(id_ce, 32)
class EDIPartyName(univ.Sequence):
pass
EDIPartyName.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('partyName',
DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class AnotherName(univ.Sequence):
pass
AnotherName.componentType = namedtype.NamedTypes(
namedtype.NamedType('type-id', univ.ObjectIdentifier()),
namedtype.NamedType('value', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class GeneralName(univ.Choice):
pass
GeneralName.componentType = namedtype.NamedTypes(
namedtype.NamedType('otherName',
AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('rfc822Name',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('dNSName',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('x400Address',
ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('directoryName',
Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
namedtype.NamedType('ediPartyName',
EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
namedtype.NamedType('uniformResourceIdentifier',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
namedtype.NamedType('iPAddress',
univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
)
class GeneralNames(univ.SequenceOf):
pass
GeneralNames.componentType = GeneralName()
GeneralNames.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class IssuerAltName(GeneralNames):
pass
id_ce_cRLDistributionPoints = _OID(id_ce, 31)
class CertPolicyId(univ.ObjectIdentifier):
pass
class PolicyMappings(univ.SequenceOf):
pass
PolicyMappings.componentType = univ.Sequence(componentType=namedtype.NamedTypes(
namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
))
PolicyMappings.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class PolicyQualifierId(univ.ObjectIdentifier):
pass
holdInstruction = _OID(2, 2, 840, 10040, 2)
id_ce_subjectDirectoryAttributes = _OID(id_ce, 9)
id_holdinstruction_callissuer = _OID(holdInstruction, 2)
class SubjectDirectoryAttributes(univ.SequenceOf):
pass
SubjectDirectoryAttributes.componentType = Attribute()
SubjectDirectoryAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
anyPolicy = _OID(id_ce_certificatePolicies, 0)
id_ce_subjectAltName = _OID(id_ce, 17)
id_kp_emailProtection = _OID(id_kp, 4)
class ReasonFlags(univ.BitString):
pass
ReasonFlags.namedValues = namedval.NamedValues(
('unused', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('privilegeWithdrawn', 7),
('aACompromise', 8)
)
class DistributionPointName(univ.Choice):
pass
DistributionPointName.componentType = namedtype.NamedTypes(
namedtype.NamedType('fullName',
GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class DistributionPoint(univ.Sequence):
pass
DistributionPoint.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
id_ce_keyUsage = _OID(id_ce, 15)
class PolicyQualifierInfo(univ.Sequence):
pass
PolicyQualifierInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
namedtype.NamedType('qualifier', univ.Any())
)
class PolicyInformation(univ.Sequence):
pass
PolicyInformation.componentType = namedtype.NamedTypes(
namedtype.NamedType('policyIdentifier', CertPolicyId()),
namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()))
)
class CertificatePolicies(univ.SequenceOf):
pass
CertificatePolicies.componentType = PolicyInformation()
CertificatePolicies.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
id_ce_basicConstraints = _OID(id_ce, 19)
class HoldInstructionCode(univ.ObjectIdentifier):
pass
class KeyPurposeId(univ.ObjectIdentifier):
pass
class ExtKeyUsageSyntax(univ.SequenceOf):
pass
ExtKeyUsageSyntax.componentType = KeyPurposeId()
ExtKeyUsageSyntax.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class SubjectAltName(GeneralNames):
pass
class BasicConstraints(univ.Sequence):
pass
BasicConstraints.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('cA', univ.Boolean().subtype(value=0)),
namedtype.OptionalNamedType('pathLenConstraint',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
)
class SkipCerts(univ.Integer):
pass
SkipCerts.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
class InhibitAnyPolicy(SkipCerts):
pass
class CRLNumber(univ.Integer):
pass
CRLNumber.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
class BaseCRLNumber(CRLNumber):
pass
class KeyIdentifier(univ.OctetString):
pass
class AuthorityKeyIdentifier(univ.Sequence):
pass
AuthorityKeyIdentifier.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
id_ce_nameConstraints = _OID(id_ce, 30)
id_kp_serverAuth = _OID(id_kp, 1)
id_ce_freshestCRL = _OID(id_ce, 46)
id_ce_cRLReasons = _OID(id_ce, 21)
class CRLDistributionPoints(univ.SequenceOf):
pass
CRLDistributionPoints.componentType = DistributionPoint()
CRLDistributionPoints.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class FreshestCRL(CRLDistributionPoints):
pass
id_ce_inhibitAnyPolicy = _OID(id_ce, 54)
class CRLReason(univ.Enumerated):
pass
CRLReason.namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8),
('privilegeWithdrawn', 9),
('aACompromise', 10)
)
class BaseDistance(univ.Integer):
pass
BaseDistance.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
class GeneralSubtree(univ.Sequence):
pass
GeneralSubtree.componentType = namedtype.NamedTypes(
namedtype.NamedType('base', GeneralName()),
namedtype.DefaultedNamedType('minimum', BaseDistance().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class GeneralSubtrees(univ.SequenceOf):
pass
GeneralSubtrees.componentType = GeneralSubtree()
GeneralSubtrees.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class NameConstraints(univ.Sequence):
pass
NameConstraints.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_pe_authorityInfoAccess = _OID(id_pe, 1)
id_pe_subjectInfoAccess = _OID(id_pe, 11)
id_ce_certificateIssuer = _OID(id_ce, 29)
id_ce_invalidityDate = _OID(id_ce, 24)
class DirectoryString(univ.Choice):
pass
DirectoryString.componentType = namedtype.NamedTypes(
namedtype.NamedType('any', univ.Any())
)
id_ce_authorityKeyIdentifier = _OID(id_ce, 35)
class AccessDescription(univ.Sequence):
pass
AccessDescription.componentType = namedtype.NamedTypes(
namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
namedtype.NamedType('accessLocation', GeneralName())
)
class AuthorityInfoAccessSyntax(univ.SequenceOf):
pass
AuthorityInfoAccessSyntax.componentType = AccessDescription()
AuthorityInfoAccessSyntax.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
id_ce_issuingDistributionPoint = _OID(id_ce, 28)
class CPSuri(char.IA5String):
pass
class DisplayText(univ.Choice):
pass
DisplayText.componentType = namedtype.NamedTypes(
namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('visibleString',
char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
)
class NoticeReference(univ.Sequence):
pass
NoticeReference.componentType = namedtype.NamedTypes(
namedtype.NamedType('organization', DisplayText()),
namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
)
class UserNotice(univ.Sequence):
pass
UserNotice.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('noticeRef', NoticeReference()),
namedtype.OptionalNamedType('explicitText', DisplayText())
)
class PrivateKeyUsagePeriod(univ.Sequence):
pass
PrivateKeyUsagePeriod.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_ce_subjectKeyIdentifier = _OID(id_ce, 14)
class CertificateIssuer(GeneralNames):
pass
class InvalidityDate(useful.GeneralizedTime):
pass
class SubjectInfoAccessSyntax(univ.SequenceOf):
pass
SubjectInfoAccessSyntax.componentType = AccessDescription()
SubjectInfoAccessSyntax.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class KeyUsage(univ.BitString):
pass
KeyUsage.namedValues = namedval.NamedValues(
('digitalSignature', 0),
('nonRepudiation', 1),
('keyEncipherment', 2),
('dataEncipherment', 3),
('keyAgreement', 4),
('keyCertSign', 5),
('cRLSign', 6),
('encipherOnly', 7),
('decipherOnly', 8)
)
id_ce_extKeyUsage = _OID(id_ce, 37)
anyExtendedKeyUsage = _OID(id_ce_extKeyUsage, 0)
id_ce_privateKeyUsagePeriod = _OID(id_ce, 16)
id_ce_policyMappings = _OID(id_ce, 33)
id_ce_cRLNumber = _OID(id_ce, 20)
id_ce_policyConstraints = _OID(id_ce, 36)
id_holdinstruction_none = _OID(holdInstruction, 1)
id_holdinstruction_reject = _OID(holdInstruction, 3)
id_kp_timeStamping = _OID(id_kp, 8)
class PolicyConstraints(univ.Sequence):
pass
PolicyConstraints.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('requireExplicitPolicy',
SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('inhibitPolicyMapping',
SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class SubjectKeyIdentifier(KeyIdentifier):
pass
id_kp_clientAuth = _OID(id_kp, 2)
id_ce_deltaCRLIndicator = _OID(id_ce, 27)
id_ce_issuerAltName = _OID(id_ce, 18)
id_kp_codeSigning = _OID(id_kp, 3)
id_ce_holdInstructionCode = _OID(id_ce, 23)
class IssuingDistributionPoint(univ.Sequence):
pass
IssuingDistributionPoint.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.DefaultedNamedType('onlyContainsUserCerts', univ.Boolean().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(value=0)),
namedtype.DefaultedNamedType('onlyContainsCACerts', univ.Boolean().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)).subtype(value=0)),
namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.DefaultedNamedType('indirectCRL', univ.Boolean().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)).subtype(value=0)),
namedtype.DefaultedNamedType('onlyContainsAttributeCerts', univ.Boolean().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)).subtype(value=0))
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc3280.py
| 0.661595 | 0.320263 |
rfc3280.py
|
pypi
|
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import opentype
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
MAX = float('inf')
def _buildOid(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
ub_e163_4_sub_address_length = univ.Integer(40)
ub_e163_4_number_length = univ.Integer(15)
unformatted_postal_address = univ.Integer(16)
class TerminalType(univ.Integer):
pass
TerminalType.namedValues = namedval.NamedValues(
('telex', 3),
('teletex', 4),
('g3-facsimile', 5),
('g4-facsimile', 6),
('ia5-terminal', 7),
('videotex', 8)
)
class Extension(univ.Sequence):
pass
Extension.componentType = namedtype.NamedTypes(
namedtype.NamedType('extnID', univ.ObjectIdentifier()),
namedtype.DefaultedNamedType('critical', univ.Boolean().subtype(value=0)),
namedtype.NamedType('extnValue', univ.OctetString())
)
class Extensions(univ.SequenceOf):
pass
Extensions.componentType = Extension()
Extensions.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
physical_delivery_personal_name = univ.Integer(13)
ub_unformatted_address_length = univ.Integer(180)
ub_pds_parameter_length = univ.Integer(30)
ub_pds_physical_address_lines = univ.Integer(6)
class UnformattedPostalAddress(univ.Set):
pass
UnformattedPostalAddress.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))),
namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
)
ub_organization_name = univ.Integer(64)
class X520OrganizationName(univ.Choice):
pass
X520OrganizationName.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
)
ub_x121_address_length = univ.Integer(16)
pds_name = univ.Integer(7)
id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
id_kp = _buildOid(id_pkix, 3)
ub_postal_code_length = univ.Integer(16)
class PostalCode(univ.Choice):
pass
PostalCode.componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
namedtype.NamedType('printable-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
)
ub_generation_qualifier_length = univ.Integer(3)
unique_postal_name = univ.Integer(20)
class DomainComponent(char.IA5String):
pass
ub_domain_defined_attribute_value_length = univ.Integer(128)
ub_match = univ.Integer(128)
id_at = _buildOid(2, 5, 4)
class AttributeType(univ.ObjectIdentifier):
pass
id_at_organizationalUnitName = _buildOid(id_at, 11)
terminal_type = univ.Integer(23)
class PDSParameter(univ.Set):
pass
PDSParameter.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
)
class PhysicalDeliveryPersonalName(PDSParameter):
pass
ub_surname_length = univ.Integer(40)
id_ad = _buildOid(id_pkix, 48)
ub_domain_defined_attribute_type_length = univ.Integer(8)
class TeletexDomainDefinedAttribute(univ.Sequence):
pass
TeletexDomainDefinedAttribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
namedtype.NamedType('value', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
)
ub_domain_defined_attributes = univ.Integer(4)
class TeletexDomainDefinedAttributes(univ.SequenceOf):
pass
TeletexDomainDefinedAttributes.componentType = TeletexDomainDefinedAttribute()
TeletexDomainDefinedAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
extended_network_address = univ.Integer(22)
ub_locality_name = univ.Integer(128)
class X520LocalityName(univ.Choice):
pass
X520LocalityName.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
)
teletex_organization_name = univ.Integer(3)
ub_given_name_length = univ.Integer(16)
ub_initials_length = univ.Integer(5)
class PersonalName(univ.Set):
pass
PersonalName.componentType = namedtype.NamedTypes(
namedtype.NamedType('surname', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
ub_organizational_unit_name_length = univ.Integer(32)
class OrganizationalUnitName(char.PrintableString):
pass
OrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
id_at_generationQualifier = _buildOid(id_at, 44)
class Version(univ.Integer):
pass
Version.namedValues = namedval.NamedValues(
('v1', 0),
('v2', 1),
('v3', 2)
)
class CertificateSerialNumber(univ.Integer):
pass
class AlgorithmIdentifier(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('parameters', univ.Any())
)
class Time(univ.Choice):
pass
Time.componentType = namedtype.NamedTypes(
namedtype.NamedType('utcTime', useful.UTCTime()),
namedtype.NamedType('generalTime', useful.GeneralizedTime())
)
class AttributeValue(univ.Any):
pass
certificateAttributesMap = {}
class AttributeTypeAndValue(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType(
'value', AttributeValue(),
openType=opentype.OpenType('type', certificateAttributesMap)
)
)
class RelativeDistinguishedName(univ.SetOf):
pass
RelativeDistinguishedName.componentType = AttributeTypeAndValue()
RelativeDistinguishedName.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class RDNSequence(univ.SequenceOf):
pass
RDNSequence.componentType = RelativeDistinguishedName()
class Name(univ.Choice):
pass
Name.componentType = namedtype.NamedTypes(
namedtype.NamedType('rdnSequence', RDNSequence())
)
class TBSCertList(univ.Sequence):
pass
TBSCertList.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('version', Version()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('thisUpdate', Time()),
namedtype.OptionalNamedType('nextUpdate', Time()),
namedtype.OptionalNamedType(
'revokedCertificates', univ.SequenceOf(
componentType=univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('userCertificate', CertificateSerialNumber()),
namedtype.NamedType('revocationDate', Time()),
namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
)
)
)
),
namedtype.OptionalNamedType(
'crlExtensions', Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class CertificateList(univ.Sequence):
pass
CertificateList.componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertList', TBSCertList()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
class PhysicalDeliveryOfficeName(PDSParameter):
pass
ub_extension_attributes = univ.Integer(256)
certificateExtensionsMap = {
}
class ExtensionAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'extension-attribute-type',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, ub_extension_attributes)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType(
'extension-attribute-value',
univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)),
openType=opentype.OpenType('type', certificateExtensionsMap))
)
id_qt = _buildOid(id_pkix, 2)
id_qt_cps = _buildOid(id_qt, 1)
id_at_stateOrProvinceName = _buildOid(id_at, 8)
id_at_title = _buildOid(id_at, 12)
id_at_serialNumber = _buildOid(id_at, 5)
class X520dnQualifier(char.PrintableString):
pass
class PosteRestanteAddress(PDSParameter):
pass
poste_restante_address = univ.Integer(19)
class UniqueIdentifier(univ.BitString):
pass
class Validity(univ.Sequence):
pass
Validity.componentType = namedtype.NamedTypes(
namedtype.NamedType('notBefore', Time()),
namedtype.NamedType('notAfter', Time())
)
class SubjectPublicKeyInfo(univ.Sequence):
pass
SubjectPublicKeyInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', AlgorithmIdentifier()),
namedtype.NamedType('subjectPublicKey', univ.BitString())
)
class TBSCertificate(univ.Sequence):
pass
TBSCertificate.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version',
Version().subtype(explicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value="v1")),
namedtype.NamedType('serialNumber', CertificateSerialNumber()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('validity', Validity()),
namedtype.NamedType('subject', Name()),
namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('extensions',
Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
physical_delivery_office_name = univ.Integer(10)
ub_name = univ.Integer(32768)
class X520name(univ.Choice):
pass
X520name.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
)
id_at_dnQualifier = _buildOid(id_at, 46)
ub_serial_number = univ.Integer(64)
ub_pseudonym = univ.Integer(128)
pkcs_9 = _buildOid(1, 2, 840, 113549, 1, 9)
class X121Address(char.NumericString):
pass
X121Address.subtypeSpec = constraint.ValueSizeConstraint(1, ub_x121_address_length)
class NetworkAddress(X121Address):
pass
ub_integer_options = univ.Integer(256)
id_at_commonName = _buildOid(id_at, 3)
ub_organization_name_length = univ.Integer(64)
id_ad_ocsp = _buildOid(id_ad, 1)
ub_country_name_numeric_length = univ.Integer(3)
ub_country_name_alpha_length = univ.Integer(2)
class PhysicalDeliveryCountryName(univ.Choice):
pass
PhysicalDeliveryCountryName.componentType = namedtype.NamedTypes(
namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
)
id_emailAddress = _buildOid(pkcs_9, 1)
common_name = univ.Integer(1)
class X520Pseudonym(univ.Choice):
pass
X520Pseudonym.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym)))
)
ub_domain_name_length = univ.Integer(16)
class AdministrationDomainName(univ.Choice):
pass
AdministrationDomainName.tagSet = univ.Choice.tagSet.tagExplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2))
AdministrationDomainName.componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
namedtype.NamedType('printable', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
)
class PresentationAddress(univ.Sequence):
pass
PresentationAddress.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class ExtendedNetworkAddress(univ.Choice):
pass
ExtendedNetworkAddress.componentType = namedtype.NamedTypes(
namedtype.NamedType(
'e163-4-address', univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('number', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
)
),
namedtype.NamedType('psap-address', PresentationAddress().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class TeletexOrganizationName(char.TeletexString):
pass
TeletexOrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
ub_terminal_id_length = univ.Integer(24)
class TerminalIdentifier(char.PrintableString):
pass
TerminalIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_terminal_id_length)
id_ad_caIssuers = _buildOid(id_ad, 2)
id_at_countryName = _buildOid(id_at, 6)
class StreetAddress(PDSParameter):
pass
postal_code = univ.Integer(9)
id_at_givenName = _buildOid(id_at, 42)
ub_title = univ.Integer(64)
class ExtensionAttributes(univ.SetOf):
pass
ExtensionAttributes.componentType = ExtensionAttribute()
ExtensionAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, ub_extension_attributes)
ub_emailaddress_length = univ.Integer(255)
id_ad_caRepository = _buildOid(id_ad, 5)
class ExtensionORAddressComponents(PDSParameter):
pass
ub_organizational_unit_name = univ.Integer(64)
class X520OrganizationalUnitName(univ.Choice):
pass
X520OrganizationalUnitName.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
)
class LocalPostalAttributes(PDSParameter):
pass
teletex_organizational_unit_names = univ.Integer(5)
class X520Title(univ.Choice):
pass
X520Title.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
)
id_at_localityName = _buildOid(id_at, 7)
id_at_initials = _buildOid(id_at, 43)
ub_state_name = univ.Integer(128)
class X520StateOrProvinceName(univ.Choice):
pass
X520StateOrProvinceName.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
)
physical_delivery_organization_name = univ.Integer(14)
id_at_surname = _buildOid(id_at, 4)
class X520countryName(char.PrintableString):
pass
X520countryName.subtypeSpec = constraint.ValueSizeConstraint(2, 2)
physical_delivery_office_number = univ.Integer(11)
id_qt_unotice = _buildOid(id_qt, 2)
class X520SerialNumber(char.PrintableString):
pass
X520SerialNumber.subtypeSpec = constraint.ValueSizeConstraint(1, ub_serial_number)
class Attribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('values',
univ.SetOf(componentType=AttributeValue()),
openType=opentype.OpenType('type', certificateAttributesMap))
)
ub_common_name = univ.Integer(64)
id_pe = _buildOid(id_pkix, 1)
class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
pass
class EmailAddress(char.IA5String):
pass
EmailAddress.subtypeSpec = constraint.ValueSizeConstraint(1, ub_emailaddress_length)
id_at_organizationName = _buildOid(id_at, 10)
post_office_box_address = univ.Integer(18)
class BuiltInDomainDefinedAttribute(univ.Sequence):
pass
BuiltInDomainDefinedAttribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
namedtype.NamedType('value', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
)
class BuiltInDomainDefinedAttributes(univ.SequenceOf):
pass
BuiltInDomainDefinedAttributes.componentType = BuiltInDomainDefinedAttribute()
BuiltInDomainDefinedAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
id_at_pseudonym = _buildOid(id_at, 65)
id_domainComponent = _buildOid(0, 9, 2342, 19200300, 100, 1, 25)
class X520CommonName(univ.Choice):
pass
X520CommonName.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
)
extension_OR_address_components = univ.Integer(12)
ub_organizational_units = univ.Integer(4)
teletex_personal_name = univ.Integer(4)
ub_numeric_user_id_length = univ.Integer(32)
ub_common_name_length = univ.Integer(64)
class TeletexCommonName(char.TeletexString):
pass
TeletexCommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
class PhysicalDeliveryOrganizationName(PDSParameter):
pass
extension_physical_delivery_address_components = univ.Integer(15)
class NumericUserIdentifier(char.NumericString):
pass
NumericUserIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
class CountryName(univ.Choice):
pass
CountryName.tagSet = univ.Choice.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1))
CountryName.componentType = namedtype.NamedTypes(
namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
)
class OrganizationName(char.PrintableString):
pass
OrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
class OrganizationalUnitNames(univ.SequenceOf):
pass
OrganizationalUnitNames.componentType = OrganizationalUnitName()
OrganizationalUnitNames.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
class PrivateDomainName(univ.Choice):
pass
PrivateDomainName.componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
namedtype.NamedType('printable', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
)
class BuiltInStandardAttributes(univ.Sequence):
pass
BuiltInStandardAttributes.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('country-name', CountryName()),
namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
)
class ORAddress(univ.Sequence):
pass
ORAddress.componentType = namedtype.NamedTypes(
namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
)
class DistinguishedName(RDNSequence):
pass
id_ad_timeStamping = _buildOid(id_ad, 3)
class PhysicalDeliveryOfficeNumber(PDSParameter):
pass
teletex_domain_defined_attributes = univ.Integer(6)
class UniquePostalName(PDSParameter):
pass
physical_delivery_country_name = univ.Integer(8)
ub_pds_name_length = univ.Integer(16)
class PDSName(char.PrintableString):
pass
PDSName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_pds_name_length)
class TeletexPersonalName(univ.Set):
pass
TeletexPersonalName.componentType = namedtype.NamedTypes(
namedtype.NamedType('surname', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
street_address = univ.Integer(17)
class PostOfficeBoxAddress(PDSParameter):
pass
local_postal_attributes = univ.Integer(21)
class DirectoryString(univ.Choice):
pass
DirectoryString.componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
teletex_common_name = univ.Integer(2)
class CommonName(char.PrintableString):
pass
CommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
class Certificate(univ.Sequence):
pass
Certificate.componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertificate', TBSCertificate()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
class TeletexOrganizationalUnitName(char.TeletexString):
pass
TeletexOrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
id_at_name = _buildOid(id_at, 41)
class TeletexOrganizationalUnitNames(univ.SequenceOf):
pass
TeletexOrganizationalUnitNames.componentType = TeletexOrganizationalUnitName()
TeletexOrganizationalUnitNames.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
id_ce = _buildOid(2, 5, 29)
id_ce_issuerAltName = _buildOid(id_ce, 18)
class SkipCerts(univ.Integer):
pass
SkipCerts.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
class CRLReason(univ.Enumerated):
pass
CRLReason.namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8),
('privilegeWithdrawn', 9),
('aACompromise', 10)
)
class PrivateKeyUsagePeriod(univ.Sequence):
pass
PrivateKeyUsagePeriod.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
anotherNameMap = {
}
class AnotherName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type-id', univ.ObjectIdentifier()),
namedtype.NamedType(
'value',
univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
openType=opentype.OpenType('type-id', anotherNameMap)
)
)
class EDIPartyName(univ.Sequence):
pass
EDIPartyName.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('partyName', DirectoryString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class GeneralName(univ.Choice):
pass
GeneralName.componentType = namedtype.NamedTypes(
namedtype.NamedType('otherName',
AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('rfc822Name',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('dNSName',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('x400Address',
ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('directoryName',
Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
namedtype.NamedType('ediPartyName',
EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
namedtype.NamedType('uniformResourceIdentifier',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
namedtype.NamedType('iPAddress',
univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
)
class BaseDistance(univ.Integer):
pass
BaseDistance.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
class GeneralSubtree(univ.Sequence):
pass
GeneralSubtree.componentType = namedtype.NamedTypes(
namedtype.NamedType('base', GeneralName()),
namedtype.DefaultedNamedType('minimum', BaseDistance().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class GeneralNames(univ.SequenceOf):
pass
GeneralNames.componentType = GeneralName()
GeneralNames.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class DistributionPointName(univ.Choice):
pass
DistributionPointName.componentType = namedtype.NamedTypes(
namedtype.NamedType('fullName',
GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class ReasonFlags(univ.BitString):
pass
ReasonFlags.namedValues = namedval.NamedValues(
('unused', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('privilegeWithdrawn', 7),
('aACompromise', 8)
)
class IssuingDistributionPoint(univ.Sequence):
pass
IssuingDistributionPoint.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.DefaultedNamedType('onlyContainsUserCerts', univ.Boolean().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(value=0)),
namedtype.DefaultedNamedType('onlyContainsCACerts', univ.Boolean().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)).subtype(value=0)),
namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.DefaultedNamedType('indirectCRL', univ.Boolean().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)).subtype(value=0)),
namedtype.DefaultedNamedType('onlyContainsAttributeCerts', univ.Boolean().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)).subtype(value=0))
)
id_ce_certificatePolicies = _buildOid(id_ce, 32)
id_kp_emailProtection = _buildOid(id_kp, 4)
class AccessDescription(univ.Sequence):
pass
AccessDescription.componentType = namedtype.NamedTypes(
namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
namedtype.NamedType('accessLocation', GeneralName())
)
class IssuerAltName(GeneralNames):
pass
id_ce_cRLDistributionPoints = _buildOid(id_ce, 31)
holdInstruction = _buildOid(2, 2, 840, 10040, 2)
id_holdinstruction_callissuer = _buildOid(holdInstruction, 2)
id_ce_subjectDirectoryAttributes = _buildOid(id_ce, 9)
id_ce_issuingDistributionPoint = _buildOid(id_ce, 28)
class DistributionPoint(univ.Sequence):
pass
DistributionPoint.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class CRLDistributionPoints(univ.SequenceOf):
pass
CRLDistributionPoints.componentType = DistributionPoint()
CRLDistributionPoints.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class GeneralSubtrees(univ.SequenceOf):
pass
GeneralSubtrees.componentType = GeneralSubtree()
GeneralSubtrees.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class NameConstraints(univ.Sequence):
pass
NameConstraints.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class SubjectDirectoryAttributes(univ.SequenceOf):
pass
SubjectDirectoryAttributes.componentType = Attribute()
SubjectDirectoryAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
id_kp_OCSPSigning = _buildOid(id_kp, 9)
id_kp_timeStamping = _buildOid(id_kp, 8)
class DisplayText(univ.Choice):
pass
DisplayText.componentType = namedtype.NamedTypes(
namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('visibleString',
char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
)
class NoticeReference(univ.Sequence):
pass
NoticeReference.componentType = namedtype.NamedTypes(
namedtype.NamedType('organization', DisplayText()),
namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
)
class UserNotice(univ.Sequence):
pass
UserNotice.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('noticeRef', NoticeReference()),
namedtype.OptionalNamedType('explicitText', DisplayText())
)
class PolicyQualifierId(univ.ObjectIdentifier):
pass
policyQualifierInfoMap = {
}
class PolicyQualifierInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
namedtype.NamedType(
'qualifier', univ.Any(),
openType=opentype.OpenType('policyQualifierId', policyQualifierInfoMap)
)
)
class CertPolicyId(univ.ObjectIdentifier):
pass
class PolicyInformation(univ.Sequence):
pass
PolicyInformation.componentType = namedtype.NamedTypes(
namedtype.NamedType('policyIdentifier', CertPolicyId()),
namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()))
)
class CertificatePolicies(univ.SequenceOf):
pass
CertificatePolicies.componentType = PolicyInformation()
CertificatePolicies.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class SubjectAltName(GeneralNames):
pass
id_ce_basicConstraints = _buildOid(id_ce, 19)
id_ce_authorityKeyIdentifier = _buildOid(id_ce, 35)
id_kp_codeSigning = _buildOid(id_kp, 3)
class BasicConstraints(univ.Sequence):
pass
BasicConstraints.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('cA', univ.Boolean().subtype(value=0)),
namedtype.OptionalNamedType('pathLenConstraint',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
)
id_ce_certificateIssuer = _buildOid(id_ce, 29)
class PolicyMappings(univ.SequenceOf):
pass
PolicyMappings.componentType = univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
)
)
PolicyMappings.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class InhibitAnyPolicy(SkipCerts):
pass
anyPolicy = _buildOid(id_ce_certificatePolicies, 0)
class CRLNumber(univ.Integer):
pass
CRLNumber.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
class BaseCRLNumber(CRLNumber):
pass
id_ce_nameConstraints = _buildOid(id_ce, 30)
id_kp_serverAuth = _buildOid(id_kp, 1)
id_ce_freshestCRL = _buildOid(id_ce, 46)
id_ce_cRLReasons = _buildOid(id_ce, 21)
id_ce_extKeyUsage = _buildOid(id_ce, 37)
class KeyIdentifier(univ.OctetString):
pass
class AuthorityKeyIdentifier(univ.Sequence):
pass
AuthorityKeyIdentifier.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class FreshestCRL(CRLDistributionPoints):
pass
id_ce_policyConstraints = _buildOid(id_ce, 36)
id_pe_authorityInfoAccess = _buildOid(id_pe, 1)
class AuthorityInfoAccessSyntax(univ.SequenceOf):
pass
AuthorityInfoAccessSyntax.componentType = AccessDescription()
AuthorityInfoAccessSyntax.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
id_holdinstruction_none = _buildOid(holdInstruction, 1)
class CPSuri(char.IA5String):
pass
id_pe_subjectInfoAccess = _buildOid(id_pe, 11)
class SubjectKeyIdentifier(KeyIdentifier):
pass
id_ce_subjectAltName = _buildOid(id_ce, 17)
class KeyPurposeId(univ.ObjectIdentifier):
pass
class ExtKeyUsageSyntax(univ.SequenceOf):
pass
ExtKeyUsageSyntax.componentType = KeyPurposeId()
ExtKeyUsageSyntax.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class HoldInstructionCode(univ.ObjectIdentifier):
pass
id_ce_deltaCRLIndicator = _buildOid(id_ce, 27)
id_ce_keyUsage = _buildOid(id_ce, 15)
id_ce_holdInstructionCode = _buildOid(id_ce, 23)
class SubjectInfoAccessSyntax(univ.SequenceOf):
pass
SubjectInfoAccessSyntax.componentType = AccessDescription()
SubjectInfoAccessSyntax.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class InvalidityDate(useful.GeneralizedTime):
pass
class KeyUsage(univ.BitString):
pass
KeyUsage.namedValues = namedval.NamedValues(
('digitalSignature', 0),
('nonRepudiation', 1),
('keyEncipherment', 2),
('dataEncipherment', 3),
('keyAgreement', 4),
('keyCertSign', 5),
('cRLSign', 6),
('encipherOnly', 7),
('decipherOnly', 8)
)
id_ce_invalidityDate = _buildOid(id_ce, 24)
id_ce_policyMappings = _buildOid(id_ce, 33)
anyExtendedKeyUsage = _buildOid(id_ce_extKeyUsage, 0)
id_ce_privateKeyUsagePeriod = _buildOid(id_ce, 16)
id_ce_cRLNumber = _buildOid(id_ce, 20)
class CertificateIssuer(GeneralNames):
pass
id_holdinstruction_reject = _buildOid(holdInstruction, 3)
class PolicyConstraints(univ.Sequence):
pass
PolicyConstraints.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('requireExplicitPolicy',
SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('inhibitPolicyMapping',
SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_kp_clientAuth = _buildOid(id_kp, 2)
id_ce_subjectKeyIdentifier = _buildOid(id_ce, 14)
id_ce_inhibitAnyPolicy = _buildOid(id_ce, 54)
# map of AttributeType -> AttributeValue
_certificateAttributesMapUpdate = {
id_at_name: X520name(),
id_at_surname: X520name(),
id_at_givenName: X520name(),
id_at_initials: X520name(),
id_at_generationQualifier: X520name(),
id_at_commonName: X520CommonName(),
id_at_localityName: X520LocalityName(),
id_at_stateOrProvinceName: X520StateOrProvinceName(),
id_at_organizationName: X520OrganizationName(),
id_at_organizationalUnitName: X520OrganizationalUnitName(),
id_at_title: X520Title(),
id_at_dnQualifier: X520dnQualifier(),
id_at_countryName: X520countryName(),
id_at_serialNumber: X520SerialNumber(),
id_at_pseudonym: X520Pseudonym(),
id_domainComponent: DomainComponent(),
id_emailAddress: EmailAddress(),
}
certificateAttributesMap.update(_certificateAttributesMapUpdate)
# map of Certificate Extension OIDs to Extensions
_certificateExtensionsMap = {
id_ce_authorityKeyIdentifier: AuthorityKeyIdentifier(),
id_ce_subjectKeyIdentifier: SubjectKeyIdentifier(),
id_ce_keyUsage: KeyUsage(),
id_ce_privateKeyUsagePeriod: PrivateKeyUsagePeriod(),
id_ce_certificatePolicies: PolicyInformation(), # could be a sequence of concat'ed objects?
id_ce_policyMappings: PolicyMappings(),
id_ce_subjectAltName: SubjectAltName(),
id_ce_issuerAltName: IssuerAltName(),
id_ce_subjectDirectoryAttributes: SubjectDirectoryAttributes(),
id_ce_basicConstraints: BasicConstraints(),
id_ce_nameConstraints: NameConstraints(),
id_ce_policyConstraints: PolicyConstraints(),
id_ce_extKeyUsage: ExtKeyUsageSyntax(),
id_ce_cRLDistributionPoints: CRLDistributionPoints(),
id_pe_authorityInfoAccess: AuthorityInfoAccessSyntax(),
id_ce_cRLNumber: univ.Integer(),
id_ce_deltaCRLIndicator: BaseCRLNumber(),
id_ce_issuingDistributionPoint: IssuingDistributionPoint(),
id_ce_cRLReasons: CRLReason(),
id_ce_holdInstructionCode: univ.ObjectIdentifier(),
id_ce_invalidityDate: useful.GeneralizedTime(),
id_ce_certificateIssuer: GeneralNames(),
}
certificateExtensionsMap.update(_certificateExtensionsMap)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc5280.py
| 0.569494 | 0.297436 |
rfc5280.py
|
pypi
|
from pyasn1_modules.rfc2459 import *
class Attribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue()))
)
class AttributeValueAssertion(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('attributeType', AttributeType()),
namedtype.NamedType('attributeValue', AttributeValue(),
openType=opentype.OpenType('type', certificateAttributesMap))
)
pkcs_7 = univ.ObjectIdentifier('1.2.840.113549.1.7')
data = univ.ObjectIdentifier('1.2.840.113549.1.7.1')
signedData = univ.ObjectIdentifier('1.2.840.113549.1.7.2')
envelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.3')
signedAndEnvelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.4')
digestedData = univ.ObjectIdentifier('1.2.840.113549.1.7.5')
encryptedData = univ.ObjectIdentifier('1.2.840.113549.1.7.6')
class ContentType(univ.ObjectIdentifier):
pass
class ContentEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
pass
class EncryptedContent(univ.OctetString):
pass
contentTypeMap = {}
class EncryptedContentInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', ContentType()),
namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
namedtype.OptionalNamedType(
'encryptedContent', EncryptedContent().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
),
openType=opentype.OpenType('contentType', contentTypeMap)
)
)
class Version(univ.Integer): # overrides x509.Version
pass
class EncryptedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo())
)
class DigestAlgorithmIdentifier(AlgorithmIdentifier):
pass
class DigestAlgorithmIdentifiers(univ.SetOf):
componentType = DigestAlgorithmIdentifier()
class Digest(univ.OctetString):
pass
class ContentInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', ContentType()),
namedtype.OptionalNamedType(
'content',
univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)),
openType=opentype.OpenType('contentType', contentTypeMap)
)
)
class DigestedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.NamedType('contentInfo', ContentInfo()),
namedtype.NamedType('digest', Digest())
)
class IssuerAndSerialNumber(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('serialNumber', CertificateSerialNumber())
)
class KeyEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
pass
class EncryptedKey(univ.OctetString):
pass
class RecipientInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
namedtype.NamedType('encryptedKey', EncryptedKey())
)
class RecipientInfos(univ.SetOf):
componentType = RecipientInfo()
class Attributes(univ.SetOf):
componentType = Attribute()
class ExtendedCertificateInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('certificate', Certificate()),
namedtype.NamedType('attributes', Attributes())
)
class SignatureAlgorithmIdentifier(AlgorithmIdentifier):
pass
class Signature(univ.BitString):
pass
class ExtendedCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
namedtype.NamedType('signature', Signature())
)
class ExtendedCertificateOrCertificate(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certificate', Certificate()),
namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class ExtendedCertificatesAndCertificates(univ.SetOf):
componentType = ExtendedCertificateOrCertificate()
class SerialNumber(univ.Integer):
pass
class CRLEntry(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('userCertificate', SerialNumber()),
namedtype.NamedType('revocationDate', useful.UTCTime())
)
class TBSCertificateRevocationList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('lastUpdate', useful.UTCTime()),
namedtype.NamedType('nextUpdate', useful.UTCTime()),
namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=CRLEntry()))
)
class CertificateRevocationList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertificateRevocationList', TBSCertificateRevocationList()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
class CertificateRevocationLists(univ.SetOf):
componentType = CertificateRevocationList()
class DigestEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
pass
class EncryptedDigest(univ.OctetString):
pass
class SignerInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.OptionalNamedType('authenticatedAttributes', Attributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('digestEncryptionAlgorithm', DigestEncryptionAlgorithmIdentifier()),
namedtype.NamedType('encryptedDigest', EncryptedDigest()),
namedtype.OptionalNamedType('unauthenticatedAttributes', Attributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class SignerInfos(univ.SetOf):
componentType = SignerInfo()
class SignedAndEnvelopedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('recipientInfos', RecipientInfos()),
namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('signerInfos', SignerInfos())
)
class EnvelopedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('recipientInfos', RecipientInfos()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo())
)
class DigestInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.NamedType('digest', Digest())
)
class SignedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
namedtype.NamedType('contentInfo', ContentInfo()),
namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('signerInfos', SignerInfos())
)
class Data(univ.OctetString):
pass
_contentTypeMapUpdate = {
data: Data(),
signedData: SignedData(),
envelopedData: EnvelopedData(),
signedAndEnvelopedData: SignedAndEnvelopedData(),
digestedData: DigestedData(),
encryptedData: EncryptedData()
}
contentTypeMap.update(_contentTypeMapUpdate)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc2315.py
| 0.593845 | 0.526465 |
rfc2315.py
|
pypi
|
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1_modules import rfc1155
class Version(univ.Integer):
namedValues = namedval.NamedValues(
('version-1', 0)
)
defaultValue = 0
class Community(univ.OctetString):
pass
class RequestID(univ.Integer):
pass
class ErrorStatus(univ.Integer):
namedValues = namedval.NamedValues(
('noError', 0),
('tooBig', 1),
('noSuchName', 2),
('badValue', 3),
('readOnly', 4),
('genErr', 5)
)
class ErrorIndex(univ.Integer):
pass
class VarBind(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', rfc1155.ObjectName()),
namedtype.NamedType('value', rfc1155.ObjectSyntax())
)
class VarBindList(univ.SequenceOf):
componentType = VarBind()
class _RequestBase(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('request-id', RequestID()),
namedtype.NamedType('error-status', ErrorStatus()),
namedtype.NamedType('error-index', ErrorIndex()),
namedtype.NamedType('variable-bindings', VarBindList())
)
class GetRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
class GetNextRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
class GetResponsePDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
)
class SetRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
)
class TrapPDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('enterprise', univ.ObjectIdentifier()),
namedtype.NamedType('agent-addr', rfc1155.NetworkAddress()),
namedtype.NamedType('generic-trap', univ.Integer().clone(
namedValues=namedval.NamedValues(('coldStart', 0), ('warmStart', 1), ('linkDown', 2), ('linkUp', 3),
('authenticationFailure', 4), ('egpNeighborLoss', 5),
('enterpriseSpecific', 6)))),
namedtype.NamedType('specific-trap', univ.Integer()),
namedtype.NamedType('time-stamp', rfc1155.TimeTicks()),
namedtype.NamedType('variable-bindings', VarBindList())
)
class Pdus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('get-request', GetRequestPDU()),
namedtype.NamedType('get-next-request', GetNextRequestPDU()),
namedtype.NamedType('get-response', GetResponsePDU()),
namedtype.NamedType('set-request', SetRequestPDU()),
namedtype.NamedType('trap', TrapPDU())
)
class Message(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('community', Community()),
namedtype.NamedType('data', Pdus())
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc1157.py
| 0.562537 | 0.266277 |
rfc1157.py
|
pypi
|
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1_modules import rfc4211
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc5652
MAX = float('inf')
def _buildOid(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
class ChangeSubjectName(univ.Sequence):
pass
ChangeSubjectName.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('subject', rfc5280.Name()),
namedtype.OptionalNamedType('subjectAlt', rfc5280.GeneralNames())
)
class AttributeValue(univ.Any):
pass
class CMCStatus(univ.Integer):
pass
CMCStatus.namedValues = namedval.NamedValues(
('success', 0),
('failed', 2),
('pending', 3),
('noSupport', 4),
('confirmRequired', 5),
('popRequired', 6),
('partial', 7)
)
class PendInfo(univ.Sequence):
pass
PendInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('pendToken', univ.OctetString()),
namedtype.NamedType('pendTime', useful.GeneralizedTime())
)
bodyIdMax = univ.Integer(4294967295)
class BodyPartID(univ.Integer):
pass
BodyPartID.subtypeSpec = constraint.ValueRangeConstraint(0, bodyIdMax)
class BodyPartPath(univ.SequenceOf):
pass
BodyPartPath.componentType = BodyPartID()
BodyPartPath.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class BodyPartReference(univ.Choice):
pass
BodyPartReference.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('bodyPartPath', BodyPartPath())
)
class CMCFailInfo(univ.Integer):
pass
CMCFailInfo.namedValues = namedval.NamedValues(
('badAlg', 0),
('badMessageCheck', 1),
('badRequest', 2),
('badTime', 3),
('badCertId', 4),
('unsupportedExt', 5),
('mustArchiveKeys', 6),
('badIdentity', 7),
('popRequired', 8),
('popFailed', 9),
('noKeyReuse', 10),
('internalCAError', 11),
('tryLater', 12),
('authDataFail', 13)
)
class CMCStatusInfoV2(univ.Sequence):
pass
CMCStatusInfoV2.componentType = namedtype.NamedTypes(
namedtype.NamedType('cMCStatus', CMCStatus()),
namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference())),
namedtype.OptionalNamedType('statusString', char.UTF8String()),
namedtype.OptionalNamedType(
'otherInfo', univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('failInfo', CMCFailInfo()),
namedtype.NamedType('pendInfo', PendInfo()),
namedtype.NamedType(
'extendedFailInfo', univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('failInfoOID', univ.ObjectIdentifier()),
namedtype.NamedType('failInfoValue', AttributeValue()))
)
)
)
)
)
)
class GetCRL(univ.Sequence):
pass
GetCRL.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerName', rfc5280.Name()),
namedtype.OptionalNamedType('cRLName', rfc5280.GeneralName()),
namedtype.OptionalNamedType('time', useful.GeneralizedTime()),
namedtype.OptionalNamedType('reasons', rfc5280.ReasonFlags())
)
id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
id_cmc = _buildOid(id_pkix, 7)
id_cmc_batchResponses = _buildOid(id_cmc, 29)
id_cmc_popLinkWitness = _buildOid(id_cmc, 23)
class PopLinkWitnessV2(univ.Sequence):
pass
PopLinkWitnessV2.componentType = namedtype.NamedTypes(
namedtype.NamedType('keyGenAlgorithm', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('macAlgorithm', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('witness', univ.OctetString())
)
id_cmc_popLinkWitnessV2 = _buildOid(id_cmc, 33)
id_cmc_identityProofV2 = _buildOid(id_cmc, 34)
id_cmc_revokeRequest = _buildOid(id_cmc, 17)
id_cmc_recipientNonce = _buildOid(id_cmc, 7)
class ControlsProcessed(univ.Sequence):
pass
ControlsProcessed.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference()))
)
class CertificationRequest(univ.Sequence):
pass
CertificationRequest.componentType = namedtype.NamedTypes(
namedtype.NamedType(
'certificationRequestInfo', univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer()),
namedtype.NamedType('subject', rfc5280.Name()),
namedtype.NamedType(
'subjectPublicKeyInfo', univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('subjectPublicKey', univ.BitString())
)
)
),
namedtype.NamedType(
'attributes', univ.SetOf(
componentType=rfc5652.Attribute()).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
)
)
)
),
namedtype.NamedType('signatureAlgorithm', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
class TaggedCertificationRequest(univ.Sequence):
pass
TaggedCertificationRequest.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('certificationRequest', CertificationRequest())
)
class TaggedRequest(univ.Choice):
pass
TaggedRequest.componentType = namedtype.NamedTypes(
namedtype.NamedType('tcr', TaggedCertificationRequest().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('crm',
rfc4211.CertReqMsg().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('orm', univ.Sequence(componentType=namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('requestMessageType', univ.ObjectIdentifier()),
namedtype.NamedType('requestMessageValue', univ.Any())
))
.subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
id_cmc_popLinkRandom = _buildOid(id_cmc, 22)
id_cmc_statusInfo = _buildOid(id_cmc, 1)
id_cmc_trustedAnchors = _buildOid(id_cmc, 26)
id_cmc_transactionId = _buildOid(id_cmc, 5)
id_cmc_encryptedPOP = _buildOid(id_cmc, 9)
class PublishTrustAnchors(univ.Sequence):
pass
PublishTrustAnchors.componentType = namedtype.NamedTypes(
namedtype.NamedType('seqNumber', univ.Integer()),
namedtype.NamedType('hashAlgorithm', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('anchorHashes', univ.SequenceOf(componentType=univ.OctetString()))
)
class RevokeRequest(univ.Sequence):
pass
RevokeRequest.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerName', rfc5280.Name()),
namedtype.NamedType('serialNumber', univ.Integer()),
namedtype.NamedType('reason', rfc5280.CRLReason()),
namedtype.OptionalNamedType('invalidityDate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('passphrase', univ.OctetString()),
namedtype.OptionalNamedType('comment', char.UTF8String())
)
id_cmc_senderNonce = _buildOid(id_cmc, 6)
id_cmc_authData = _buildOid(id_cmc, 27)
class TaggedContentInfo(univ.Sequence):
pass
TaggedContentInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('contentInfo', rfc5652.ContentInfo())
)
class IdentifyProofV2(univ.Sequence):
pass
IdentifyProofV2.componentType = namedtype.NamedTypes(
namedtype.NamedType('proofAlgID', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('macAlgId', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('witness', univ.OctetString())
)
class CMCPublicationInfo(univ.Sequence):
pass
CMCPublicationInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('certHashes', univ.SequenceOf(componentType=univ.OctetString())),
namedtype.NamedType('pubInfo', rfc4211.PKIPublicationInfo())
)
id_kp_cmcCA = _buildOid(rfc5280.id_kp, 27)
id_cmc_confirmCertAcceptance = _buildOid(id_cmc, 24)
id_cmc_raIdentityWitness = _buildOid(id_cmc, 35)
id_ExtensionReq = _buildOid(1, 2, 840, 113549, 1, 9, 14)
id_cct = _buildOid(id_pkix, 12)
id_cct_PKIData = _buildOid(id_cct, 2)
id_kp_cmcRA = _buildOid(rfc5280.id_kp, 28)
class CMCStatusInfo(univ.Sequence):
pass
CMCStatusInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('cMCStatus', CMCStatus()),
namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartID())),
namedtype.OptionalNamedType('statusString', char.UTF8String()),
namedtype.OptionalNamedType(
'otherInfo', univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('failInfo', CMCFailInfo()),
namedtype.NamedType('pendInfo', PendInfo())
)
)
)
)
class DecryptedPOP(univ.Sequence):
pass
DecryptedPOP.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('thePOP', univ.OctetString())
)
id_cmc_addExtensions = _buildOid(id_cmc, 8)
id_cmc_modCertTemplate = _buildOid(id_cmc, 31)
class TaggedAttribute(univ.Sequence):
pass
TaggedAttribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('attrType', univ.ObjectIdentifier()),
namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()))
)
class OtherMsg(univ.Sequence):
pass
OtherMsg.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('otherMsgType', univ.ObjectIdentifier()),
namedtype.NamedType('otherMsgValue', univ.Any())
)
class PKIData(univ.Sequence):
pass
PKIData.componentType = namedtype.NamedTypes(
namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())),
namedtype.NamedType('reqSequence', univ.SequenceOf(componentType=TaggedRequest())),
namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())),
namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg()))
)
class BodyPartList(univ.SequenceOf):
pass
BodyPartList.componentType = BodyPartID()
BodyPartList.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
id_cmc_responseBody = _buildOid(id_cmc, 37)
class AuthPublish(BodyPartID):
pass
class CMCUnsignedData(univ.Sequence):
pass
CMCUnsignedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartPath', BodyPartPath()),
namedtype.NamedType('identifier', univ.ObjectIdentifier()),
namedtype.NamedType('content', univ.Any())
)
class CMCCertId(rfc5652.IssuerAndSerialNumber):
pass
class PKIResponse(univ.Sequence):
pass
PKIResponse.componentType = namedtype.NamedTypes(
namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())),
namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())),
namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg()))
)
class ResponseBody(PKIResponse):
pass
id_cmc_statusInfoV2 = _buildOid(id_cmc, 25)
id_cmc_lraPOPWitness = _buildOid(id_cmc, 11)
class ModCertTemplate(univ.Sequence):
pass
ModCertTemplate.componentType = namedtype.NamedTypes(
namedtype.NamedType('pkiDataReference', BodyPartPath()),
namedtype.NamedType('certReferences', BodyPartList()),
namedtype.DefaultedNamedType('replace', univ.Boolean().subtype(value=1)),
namedtype.NamedType('certTemplate', rfc4211.CertTemplate())
)
id_cmc_regInfo = _buildOid(id_cmc, 18)
id_cmc_identityProof = _buildOid(id_cmc, 3)
class ExtensionReq(univ.SequenceOf):
pass
ExtensionReq.componentType = rfc5280.Extension()
ExtensionReq.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
id_kp_cmcArchive = _buildOid(rfc5280.id_kp, 28)
id_cmc_publishCert = _buildOid(id_cmc, 30)
id_cmc_dataReturn = _buildOid(id_cmc, 4)
class LraPopWitness(univ.Sequence):
pass
LraPopWitness.componentType = namedtype.NamedTypes(
namedtype.NamedType('pkiDataBodyid', BodyPartID()),
namedtype.NamedType('bodyIds', univ.SequenceOf(componentType=BodyPartID()))
)
id_aa = _buildOid(1, 2, 840, 113549, 1, 9, 16, 2)
id_aa_cmc_unsignedData = _buildOid(id_aa, 34)
id_cmc_getCert = _buildOid(id_cmc, 15)
id_cmc_batchRequests = _buildOid(id_cmc, 28)
id_cmc_decryptedPOP = _buildOid(id_cmc, 10)
id_cmc_responseInfo = _buildOid(id_cmc, 19)
id_cmc_changeSubjectName = _buildOid(id_cmc, 36)
class GetCert(univ.Sequence):
pass
GetCert.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerName', rfc5280.GeneralName()),
namedtype.NamedType('serialNumber', univ.Integer())
)
id_cmc_identification = _buildOid(id_cmc, 2)
id_cmc_queryPending = _buildOid(id_cmc, 21)
class AddExtensions(univ.Sequence):
pass
AddExtensions.componentType = namedtype.NamedTypes(
namedtype.NamedType('pkiDataReference', BodyPartID()),
namedtype.NamedType('certReferences', univ.SequenceOf(componentType=BodyPartID())),
namedtype.NamedType('extensions', univ.SequenceOf(componentType=rfc5280.Extension()))
)
class EncryptedPOP(univ.Sequence):
pass
EncryptedPOP.componentType = namedtype.NamedTypes(
namedtype.NamedType('request', TaggedRequest()),
namedtype.NamedType('cms', rfc5652.ContentInfo()),
namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('witnessAlgID', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('witness', univ.OctetString())
)
id_cmc_getCRL = _buildOid(id_cmc, 16)
id_cct_PKIResponse = _buildOid(id_cct, 3)
id_cmc_controlProcessed = _buildOid(id_cmc, 32)
class NoSignatureValue(univ.OctetString):
pass
id_ad_cmc = _buildOid(rfc5280.id_ad, 12)
id_alg_noSignature = _buildOid(id_pkix, 6, 2)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc6402.py
| 0.506836 | 0.314314 |
rfc6402.py
|
pypi
|
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import tag
from pyasn1.type import univ
class ObjectName(univ.ObjectIdentifier):
pass
class SimpleSyntax(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('number', univ.Integer()),
namedtype.NamedType('string', univ.OctetString()),
namedtype.NamedType('object', univ.ObjectIdentifier()),
namedtype.NamedType('empty', univ.Null())
)
class IpAddress(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0)
)
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(
4, 4
)
class NetworkAddress(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('internet', IpAddress())
)
class Counter(univ.Integer):
tagSet = univ.Integer.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 1)
)
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
0, 4294967295
)
class Gauge(univ.Integer):
tagSet = univ.Integer.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2)
)
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
0, 4294967295
)
class TimeTicks(univ.Integer):
tagSet = univ.Integer.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 3)
)
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
0, 4294967295
)
class Opaque(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 4)
)
class ApplicationSyntax(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('address', NetworkAddress()),
namedtype.NamedType('counter', Counter()),
namedtype.NamedType('gauge', Gauge()),
namedtype.NamedType('ticks', TimeTicks()),
namedtype.NamedType('arbitrary', Opaque())
)
class ObjectSyntax(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('simple', SimpleSyntax()),
namedtype.NamedType('application-wide', ApplicationSyntax())
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc1155.py
| 0.720172 | 0.405743 |
rfc1155.py
|
pypi
|
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1_modules import rfc3280
MAX = float('inf')
def _buildOid(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
class ObjectDigestInfo(univ.Sequence):
pass
ObjectDigestInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('digestedObjectType', univ.Enumerated(
namedValues=namedval.NamedValues(('publicKey', 0), ('publicKeyCert', 1), ('otherObjectTypes', 2)))),
namedtype.OptionalNamedType('otherObjectTypeID', univ.ObjectIdentifier()),
namedtype.NamedType('digestAlgorithm', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('objectDigest', univ.BitString())
)
class IssuerSerial(univ.Sequence):
pass
IssuerSerial.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuer', rfc3280.GeneralNames()),
namedtype.NamedType('serial', rfc3280.CertificateSerialNumber()),
namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier())
)
class TargetCert(univ.Sequence):
pass
TargetCert.componentType = namedtype.NamedTypes(
namedtype.NamedType('targetCertificate', IssuerSerial()),
namedtype.OptionalNamedType('targetName', rfc3280.GeneralName()),
namedtype.OptionalNamedType('certDigestInfo', ObjectDigestInfo())
)
class Target(univ.Choice):
pass
Target.componentType = namedtype.NamedTypes(
namedtype.NamedType('targetName', rfc3280.GeneralName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('targetGroup', rfc3280.GeneralName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('targetCert',
TargetCert().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
class Targets(univ.SequenceOf):
pass
Targets.componentType = Target()
class ProxyInfo(univ.SequenceOf):
pass
ProxyInfo.componentType = Targets()
id_at_role = _buildOid(rfc3280.id_at, 72)
id_pe_aaControls = _buildOid(rfc3280.id_pe, 6)
id_ce_targetInformation = _buildOid(rfc3280.id_ce, 55)
id_pe_ac_auditIdentity = _buildOid(rfc3280.id_pe, 4)
class ClassList(univ.BitString):
pass
ClassList.namedValues = namedval.NamedValues(
('unmarked', 0),
('unclassified', 1),
('restricted', 2),
('confidential', 3),
('secret', 4),
('topSecret', 5)
)
class SecurityCategory(univ.Sequence):
pass
SecurityCategory.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class Clearance(univ.Sequence):
pass
Clearance.componentType = namedtype.NamedTypes(
namedtype.NamedType('policyId', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.DefaultedNamedType('classList',
ClassList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1)).subtype(
value="unclassified")),
namedtype.OptionalNamedType('securityCategories', univ.SetOf(componentType=SecurityCategory()).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class AttCertVersion(univ.Integer):
pass
AttCertVersion.namedValues = namedval.NamedValues(
('v2', 1)
)
id_aca = _buildOid(rfc3280.id_pkix, 10)
id_at_clearance = _buildOid(2, 5, 1, 5, 55)
class AttrSpec(univ.SequenceOf):
pass
AttrSpec.componentType = univ.ObjectIdentifier()
class AAControls(univ.Sequence):
pass
AAControls.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('pathLenConstraint',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
namedtype.OptionalNamedType('permittedAttrs',
AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('excludedAttrs',
AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.DefaultedNamedType('permitUnSpecified', univ.Boolean().subtype(value=1))
)
class AttCertValidityPeriod(univ.Sequence):
pass
AttCertValidityPeriod.componentType = namedtype.NamedTypes(
namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()),
namedtype.NamedType('notAfterTime', useful.GeneralizedTime())
)
id_aca_authenticationInfo = _buildOid(id_aca, 1)
class V2Form(univ.Sequence):
pass
V2Form.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('issuerName', rfc3280.GeneralNames()),
namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class AttCertIssuer(univ.Choice):
pass
AttCertIssuer.componentType = namedtype.NamedTypes(
namedtype.NamedType('v1Form', rfc3280.GeneralNames()),
namedtype.NamedType('v2Form',
V2Form().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class Holder(univ.Sequence):
pass
Holder.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('entityName', rfc3280.GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
class AttributeCertificateInfo(univ.Sequence):
pass
AttributeCertificateInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', AttCertVersion()),
namedtype.NamedType('holder', Holder()),
namedtype.NamedType('issuer', AttCertIssuer()),
namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()),
namedtype.NamedType('attrCertValidityPeriod', AttCertValidityPeriod()),
namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())),
namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()),
namedtype.OptionalNamedType('extensions', rfc3280.Extensions())
)
class AttributeCertificate(univ.Sequence):
pass
AttributeCertificate.componentType = namedtype.NamedTypes(
namedtype.NamedType('acinfo', AttributeCertificateInfo()),
namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('signatureValue', univ.BitString())
)
id_mod = _buildOid(rfc3280.id_pkix, 0)
id_mod_attribute_cert = _buildOid(id_mod, 12)
id_aca_accessIdentity = _buildOid(id_aca, 2)
class RoleSyntax(univ.Sequence):
pass
RoleSyntax.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('roleAuthority', rfc3280.GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('roleName',
rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_aca_chargingIdentity = _buildOid(id_aca, 3)
class ACClearAttrs(univ.Sequence):
pass
ACClearAttrs.componentType = namedtype.NamedTypes(
namedtype.NamedType('acIssuer', rfc3280.GeneralName()),
namedtype.NamedType('acSerial', univ.Integer()),
namedtype.NamedType('attrs', univ.SequenceOf(componentType=rfc3280.Attribute()))
)
id_aca_group = _buildOid(id_aca, 4)
id_pe_ac_proxying = _buildOid(rfc3280.id_pe, 10)
class SvceAuthInfo(univ.Sequence):
pass
SvceAuthInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('service', rfc3280.GeneralName()),
namedtype.NamedType('ident', rfc3280.GeneralName()),
namedtype.OptionalNamedType('authInfo', univ.OctetString())
)
class IetfAttrSyntax(univ.Sequence):
pass
IetfAttrSyntax.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType(
'policyAuthority', rfc3280.GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType(
'values', univ.SequenceOf(
componentType=univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('octets', univ.OctetString()),
namedtype.NamedType('oid', univ.ObjectIdentifier()),
namedtype.NamedType('string', char.UTF8String())
)
)
)
)
)
id_aca_encAttrs = _buildOid(id_aca, 6)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc3281.py
| 0.672762 | 0.32029 |
rfc3281.py
|
pypi
|
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1_modules import rfc2314
from pyasn1_modules import rfc2459
from pyasn1_modules import rfc2511
MAX = float('inf')
class KeyIdentifier(univ.OctetString):
pass
class CMPCertificate(rfc2459.Certificate):
pass
class OOBCert(CMPCertificate):
pass
class CertAnnContent(CMPCertificate):
pass
class PKIFreeText(univ.SequenceOf):
"""
PKIFreeText ::= SEQUENCE SIZE (1..MAX) OF UTF8String
"""
componentType = char.UTF8String()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class PollRepContent(univ.SequenceOf):
"""
PollRepContent ::= SEQUENCE OF SEQUENCE {
certReqId INTEGER,
checkAfter INTEGER, -- time in seconds
reason PKIFreeText OPTIONAL
}
"""
class CertReq(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.NamedType('checkAfter', univ.Integer()),
namedtype.OptionalNamedType('reason', PKIFreeText())
)
componentType = CertReq()
class PollReqContent(univ.SequenceOf):
"""
PollReqContent ::= SEQUENCE OF SEQUENCE {
certReqId INTEGER
}
"""
class CertReq(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer())
)
componentType = CertReq()
class InfoTypeAndValue(univ.Sequence):
"""
InfoTypeAndValue ::= SEQUENCE {
infoType OBJECT IDENTIFIER,
infoValue ANY DEFINED BY infoType OPTIONAL
}"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('infoType', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('infoValue', univ.Any())
)
class GenRepContent(univ.SequenceOf):
componentType = InfoTypeAndValue()
class GenMsgContent(univ.SequenceOf):
componentType = InfoTypeAndValue()
class PKIConfirmContent(univ.Null):
pass
class CRLAnnContent(univ.SequenceOf):
componentType = rfc2459.CertificateList()
class CAKeyUpdAnnContent(univ.Sequence):
"""
CAKeyUpdAnnContent ::= SEQUENCE {
oldWithNew CMPCertificate,
newWithOld CMPCertificate,
newWithNew CMPCertificate
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('oldWithNew', CMPCertificate()),
namedtype.NamedType('newWithOld', CMPCertificate()),
namedtype.NamedType('newWithNew', CMPCertificate())
)
class RevDetails(univ.Sequence):
"""
RevDetails ::= SEQUENCE {
certDetails CertTemplate,
crlEntryDetails Extensions OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certDetails', rfc2511.CertTemplate()),
namedtype.OptionalNamedType('crlEntryDetails', rfc2459.Extensions())
)
class RevReqContent(univ.SequenceOf):
componentType = RevDetails()
class CertOrEncCert(univ.Choice):
"""
CertOrEncCert ::= CHOICE {
certificate [0] CMPCertificate,
encryptedCert [1] EncryptedValue
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certificate', CMPCertificate().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('encryptedCert', rfc2511.EncryptedValue().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class CertifiedKeyPair(univ.Sequence):
"""
CertifiedKeyPair ::= SEQUENCE {
certOrEncCert CertOrEncCert,
privateKey [0] EncryptedValue OPTIONAL,
publicationInfo [1] PKIPublicationInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certOrEncCert', CertOrEncCert()),
namedtype.OptionalNamedType('privateKey', rfc2511.EncryptedValue().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('publicationInfo', rfc2511.PKIPublicationInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class POPODecKeyRespContent(univ.SequenceOf):
componentType = univ.Integer()
class Challenge(univ.Sequence):
"""
Challenge ::= SEQUENCE {
owf AlgorithmIdentifier OPTIONAL,
witness OCTET STRING,
challenge OCTET STRING
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('witness', univ.OctetString()),
namedtype.NamedType('challenge', univ.OctetString())
)
class PKIStatus(univ.Integer):
"""
PKIStatus ::= INTEGER {
accepted (0),
grantedWithMods (1),
rejection (2),
waiting (3),
revocationWarning (4),
revocationNotification (5),
keyUpdateWarning (6)
}
"""
namedValues = namedval.NamedValues(
('accepted', 0),
('grantedWithMods', 1),
('rejection', 2),
('waiting', 3),
('revocationWarning', 4),
('revocationNotification', 5),
('keyUpdateWarning', 6)
)
class PKIFailureInfo(univ.BitString):
"""
PKIFailureInfo ::= BIT STRING {
badAlg (0),
badMessageCheck (1),
badRequest (2),
badTime (3),
badCertId (4),
badDataFormat (5),
wrongAuthority (6),
incorrectData (7),
missingTimeStamp (8),
badPOP (9),
certRevoked (10),
certConfirmed (11),
wrongIntegrity (12),
badRecipientNonce (13),
timeNotAvailable (14),
unacceptedPolicy (15),
unacceptedExtension (16),
addInfoNotAvailable (17),
badSenderNonce (18),
badCertTemplate (19),
signerNotTrusted (20),
transactionIdInUse (21),
unsupportedVersion (22),
notAuthorized (23),
systemUnavail (24),
systemFailure (25),
duplicateCertReq (26)
"""
namedValues = namedval.NamedValues(
('badAlg', 0),
('badMessageCheck', 1),
('badRequest', 2),
('badTime', 3),
('badCertId', 4),
('badDataFormat', 5),
('wrongAuthority', 6),
('incorrectData', 7),
('missingTimeStamp', 8),
('badPOP', 9),
('certRevoked', 10),
('certConfirmed', 11),
('wrongIntegrity', 12),
('badRecipientNonce', 13),
('timeNotAvailable', 14),
('unacceptedPolicy', 15),
('unacceptedExtension', 16),
('addInfoNotAvailable', 17),
('badSenderNonce', 18),
('badCertTemplate', 19),
('signerNotTrusted', 20),
('transactionIdInUse', 21),
('unsupportedVersion', 22),
('notAuthorized', 23),
('systemUnavail', 24),
('systemFailure', 25),
('duplicateCertReq', 26)
)
class PKIStatusInfo(univ.Sequence):
"""
PKIStatusInfo ::= SEQUENCE {
status PKIStatus,
statusString PKIFreeText OPTIONAL,
failInfo PKIFailureInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatus()),
namedtype.OptionalNamedType('statusString', PKIFreeText()),
namedtype.OptionalNamedType('failInfo', PKIFailureInfo())
)
class ErrorMsgContent(univ.Sequence):
"""
ErrorMsgContent ::= SEQUENCE {
pKIStatusInfo PKIStatusInfo,
errorCode INTEGER OPTIONAL,
-- implementation-specific error codes
errorDetails PKIFreeText OPTIONAL
-- implementation-specific error details
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('pKIStatusInfo', PKIStatusInfo()),
namedtype.OptionalNamedType('errorCode', univ.Integer()),
namedtype.OptionalNamedType('errorDetails', PKIFreeText())
)
class CertStatus(univ.Sequence):
"""
CertStatus ::= SEQUENCE {
certHash OCTET STRING,
certReqId INTEGER,
statusInfo PKIStatusInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certHash', univ.OctetString()),
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.OptionalNamedType('statusInfo', PKIStatusInfo())
)
class CertConfirmContent(univ.SequenceOf):
componentType = CertStatus()
class RevAnnContent(univ.Sequence):
"""
RevAnnContent ::= SEQUENCE {
status PKIStatus,
certId CertId,
willBeRevokedAt GeneralizedTime,
badSinceDate GeneralizedTime,
crlDetails Extensions OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatus()),
namedtype.NamedType('certId', rfc2511.CertId()),
namedtype.NamedType('willBeRevokedAt', useful.GeneralizedTime()),
namedtype.NamedType('badSinceDate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('crlDetails', rfc2459.Extensions())
)
class RevRepContent(univ.Sequence):
"""
RevRepContent ::= SEQUENCE {
status SEQUENCE SIZE (1..MAX) OF PKIStatusInfo,
revCerts [0] SEQUENCE SIZE (1..MAX) OF CertId
OPTIONAL,
crls [1] SEQUENCE SIZE (1..MAX) OF CertificateList
OPTIONAL
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType(
'revCerts', univ.SequenceOf(componentType=rfc2511.CertId()).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType(
'crls', univ.SequenceOf(componentType=rfc2459.CertificateList()).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class KeyRecRepContent(univ.Sequence):
"""
KeyRecRepContent ::= SEQUENCE {
status PKIStatusInfo,
newSigCert [0] CMPCertificate OPTIONAL,
caCerts [1] SEQUENCE SIZE (1..MAX) OF
CMPCertificate OPTIONAL,
keyPairHist [2] SEQUENCE SIZE (1..MAX) OF
CertifiedKeyPair OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType(
'newSigCert', CMPCertificate().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType(
'caCerts', univ.SequenceOf(componentType=CMPCertificate()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1),
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
)
),
namedtype.OptionalNamedType('keyPairHist', univ.SequenceOf(componentType=CertifiedKeyPair()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2),
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))
)
)
class CertResponse(univ.Sequence):
"""
CertResponse ::= SEQUENCE {
certReqId INTEGER,
status PKIStatusInfo,
certifiedKeyPair CertifiedKeyPair OPTIONAL,
rspInfo OCTET STRING OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType('certifiedKeyPair', CertifiedKeyPair()),
namedtype.OptionalNamedType('rspInfo', univ.OctetString())
)
class CertRepMessage(univ.Sequence):
"""
CertRepMessage ::= SEQUENCE {
caPubs [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
OPTIONAL,
response SEQUENCE OF CertResponse
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType(
'caPubs', univ.SequenceOf(
componentType=CMPCertificate()
).subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))
),
namedtype.NamedType('response', univ.SequenceOf(componentType=CertResponse()))
)
class POPODecKeyChallContent(univ.SequenceOf):
componentType = Challenge()
class OOBCertHash(univ.Sequence):
"""
OOBCertHash ::= SEQUENCE {
hashAlg [0] AlgorithmIdentifier OPTIONAL,
certId [1] CertId OPTIONAL,
hashVal BIT STRING
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType(
'hashAlg', rfc2459.AlgorithmIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))
),
namedtype.OptionalNamedType(
'certId', rfc2511.CertId().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))
),
namedtype.NamedType('hashVal', univ.BitString())
)
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
class NestedMessageContent(univ.SequenceOf):
"""
NestedMessageContent ::= PKIMessages
"""
componentType = univ.Any()
class DHBMParameter(univ.Sequence):
"""
DHBMParameter ::= SEQUENCE {
owf AlgorithmIdentifier,
-- AlgId for a One-Way Function (SHA-1 recommended)
mac AlgorithmIdentifier
-- the MAC AlgId (e.g., DES-MAC, Triple-DES-MAC [PKCS11],
} -- or HMAC [RFC2104, RFC2202])
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
)
id_DHBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.30')
class PBMParameter(univ.Sequence):
"""
PBMParameter ::= SEQUENCE {
salt OCTET STRING,
owf AlgorithmIdentifier,
iterationCount INTEGER,
mac AlgorithmIdentifier
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'salt', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 128))
),
namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('iterationCount', univ.Integer()),
namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
)
id_PasswordBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.13')
class PKIProtection(univ.BitString):
pass
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
nestedMessageContent = NestedMessageContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 20))
class PKIBody(univ.Choice):
"""
PKIBody ::= CHOICE { -- message-specific body elements
ir [0] CertReqMessages, --Initialization Request
ip [1] CertRepMessage, --Initialization Response
cr [2] CertReqMessages, --Certification Request
cp [3] CertRepMessage, --Certification Response
p10cr [4] CertificationRequest, --imported from [PKCS10]
popdecc [5] POPODecKeyChallContent, --pop Challenge
popdecr [6] POPODecKeyRespContent, --pop Response
kur [7] CertReqMessages, --Key Update Request
kup [8] CertRepMessage, --Key Update Response
krr [9] CertReqMessages, --Key Recovery Request
krp [10] KeyRecRepContent, --Key Recovery Response
rr [11] RevReqContent, --Revocation Request
rp [12] RevRepContent, --Revocation Response
ccr [13] CertReqMessages, --Cross-Cert. Request
ccp [14] CertRepMessage, --Cross-Cert. Response
ckuann [15] CAKeyUpdAnnContent, --CA Key Update Ann.
cann [16] CertAnnContent, --Certificate Ann.
rann [17] RevAnnContent, --Revocation Ann.
crlann [18] CRLAnnContent, --CRL Announcement
pkiconf [19] PKIConfirmContent, --Confirmation
nested [20] NestedMessageContent, --Nested Message
genm [21] GenMsgContent, --General Message
genp [22] GenRepContent, --General Response
error [23] ErrorMsgContent, --Error Message
certConf [24] CertConfirmContent, --Certificate confirm
pollReq [25] PollReqContent, --Polling request
pollRep [26] PollRepContent --Polling response
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'ir', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.NamedType(
'ip', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
),
namedtype.NamedType(
'cr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
)
),
namedtype.NamedType(
'cp', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
)
),
namedtype.NamedType(
'p10cr', rfc2314.CertificationRequest().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)
)
),
namedtype.NamedType(
'popdecc', POPODecKeyChallContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)
)
),
namedtype.NamedType(
'popdecr', POPODecKeyRespContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)
)
),
namedtype.NamedType(
'kur', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)
)
),
namedtype.NamedType(
'kup', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)
)
),
namedtype.NamedType(
'krr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)
)
),
namedtype.NamedType(
'krp', KeyRecRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 10)
)
),
namedtype.NamedType(
'rr', RevReqContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 11)
)
),
namedtype.NamedType(
'rp', RevRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 12)
)
),
namedtype.NamedType(
'ccr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 13)
)
),
namedtype.NamedType(
'ccp', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 14)
)
),
namedtype.NamedType(
'ckuann', CAKeyUpdAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 15)
)
),
namedtype.NamedType(
'cann', CertAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 16)
)
),
namedtype.NamedType(
'rann', RevAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 17)
)
),
namedtype.NamedType(
'crlann', CRLAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 18)
)
),
namedtype.NamedType(
'pkiconf', PKIConfirmContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 19)
)
),
namedtype.NamedType(
'nested', nestedMessageContent
),
# namedtype.NamedType('nested', NestedMessageContent().subtype(
# explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20)
# )
# ),
namedtype.NamedType(
'genm', GenMsgContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 21)
)
),
namedtype.NamedType(
'gen', GenRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 22)
)
),
namedtype.NamedType(
'error', ErrorMsgContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 23)
)
),
namedtype.NamedType(
'certConf', CertConfirmContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 24)
)
),
namedtype.NamedType(
'pollReq', PollReqContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 25)
)
),
namedtype.NamedType(
'pollRep', PollRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 26)
)
)
)
class PKIHeader(univ.Sequence):
"""
PKIHeader ::= SEQUENCE {
pvno INTEGER { cmp1999(1), cmp2000(2) },
sender GeneralName,
recipient GeneralName,
messageTime [0] GeneralizedTime OPTIONAL,
protectionAlg [1] AlgorithmIdentifier OPTIONAL,
senderKID [2] KeyIdentifier OPTIONAL,
recipKID [3] KeyIdentifier OPTIONAL,
transactionID [4] OCTET STRING OPTIONAL,
senderNonce [5] OCTET STRING OPTIONAL,
recipNonce [6] OCTET STRING OPTIONAL,
freeText [7] PKIFreeText OPTIONAL,
generalInfo [8] SEQUENCE SIZE (1..MAX) OF
InfoTypeAndValue OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'pvno', univ.Integer(
namedValues=namedval.NamedValues(('cmp1999', 1), ('cmp2000', 2))
)
),
namedtype.NamedType('sender', rfc2459.GeneralName()),
namedtype.NamedType('recipient', rfc2459.GeneralName()),
namedtype.OptionalNamedType('messageTime', useful.GeneralizedTime().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('protectionAlg', rfc2459.AlgorithmIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.OptionalNamedType('senderKID', rfc2459.KeyIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('recipKID', rfc2459.KeyIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.OptionalNamedType('transactionID', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.OptionalNamedType('senderNonce', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.OptionalNamedType('recipNonce', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
namedtype.OptionalNamedType('freeText', PKIFreeText().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))),
namedtype.OptionalNamedType('generalInfo',
univ.SequenceOf(
componentType=InfoTypeAndValue().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)
)
)
)
)
class ProtectedPart(univ.Sequence):
"""
ProtectedPart ::= SEQUENCE {
header PKIHeader,
body PKIBody
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PKIHeader()),
namedtype.NamedType('infoValue', PKIBody())
)
class PKIMessage(univ.Sequence):
"""
PKIMessage ::= SEQUENCE {
header PKIHeader,
body PKIBody,
protection [0] PKIProtection OPTIONAL,
extraCerts [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
OPTIONAL
}"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PKIHeader()),
namedtype.NamedType('body', PKIBody()),
namedtype.OptionalNamedType('protection', PKIProtection().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('extraCerts',
univ.SequenceOf(
componentType=CMPCertificate()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class PKIMessages(univ.SequenceOf):
"""
PKIMessages ::= SEQUENCE SIZE (1..MAX) OF PKIMessage
"""
componentType = PKIMessage()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
NestedMessageContent._componentType = PKIMessages()
nestedMessageContent._componentType = PKIMessages()
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc4210.py
| 0.585101 | 0.258244 |
rfc4210.py
|
pypi
|
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1_modules import rfc1902
max_bindings = rfc1902.Integer(2147483647)
class _BindValue(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('value', rfc1902.ObjectSyntax()),
namedtype.NamedType('unSpecified', univ.Null()),
namedtype.NamedType('noSuchObject',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('noSuchInstance',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('endOfMibView',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class VarBind(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', rfc1902.ObjectName()),
namedtype.NamedType('', _BindValue())
)
class VarBindList(univ.SequenceOf):
componentType = VarBind()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(
0, max_bindings
)
class PDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('request-id', rfc1902.Integer32()),
namedtype.NamedType('error-status', univ.Integer(
namedValues=namedval.NamedValues(('noError', 0), ('tooBig', 1), ('noSuchName', 2), ('badValue', 3),
('readOnly', 4), ('genErr', 5), ('noAccess', 6), ('wrongType', 7),
('wrongLength', 8), ('wrongEncoding', 9), ('wrongValue', 10),
('noCreation', 11), ('inconsistentValue', 12), ('resourceUnavailable', 13),
('commitFailed', 14), ('undoFailed', 15), ('authorizationError', 16),
('notWritable', 17), ('inconsistentName', 18)))),
namedtype.NamedType('error-index',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
namedtype.NamedType('variable-bindings', VarBindList())
)
class BulkPDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('request-id', rfc1902.Integer32()),
namedtype.NamedType('non-repeaters',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
namedtype.NamedType('max-repetitions',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
namedtype.NamedType('variable-bindings', VarBindList())
)
class GetRequestPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
class GetNextRequestPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
class ResponsePDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
)
class SetRequestPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
)
class GetBulkRequestPDU(BulkPDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)
)
class InformRequestPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)
)
class SNMPv2TrapPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)
)
class ReportPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)
)
class PDUs(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('get-request', GetRequestPDU()),
namedtype.NamedType('get-next-request', GetNextRequestPDU()),
namedtype.NamedType('get-bulk-request', GetBulkRequestPDU()),
namedtype.NamedType('response', ResponsePDU()),
namedtype.NamedType('set-request', SetRequestPDU()),
namedtype.NamedType('inform-request', InformRequestPDU()),
namedtype.NamedType('snmpV2-trap', SNMPv2TrapPDU()),
namedtype.NamedType('report', ReportPDU())
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc1905.py
| 0.589244 | 0.326043 |
rfc1905.py
|
pypi
|
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1_modules import rfc3280
from pyasn1_modules import rfc3852
MAX = float('inf')
def _buildOid(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
id_pkip = _buildOid(id_pkix, 5)
id_regCtrl = _buildOid(id_pkip, 1)
class SinglePubInfo(univ.Sequence):
pass
SinglePubInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('pubMethod', univ.Integer(
namedValues=namedval.NamedValues(('dontCare', 0), ('x500', 1), ('web', 2), ('ldap', 3)))),
namedtype.OptionalNamedType('pubLocation', rfc3280.GeneralName())
)
class UTF8Pairs(char.UTF8String):
pass
class PKMACValue(univ.Sequence):
pass
PKMACValue.componentType = namedtype.NamedTypes(
namedtype.NamedType('algId', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('value', univ.BitString())
)
class POPOSigningKeyInput(univ.Sequence):
pass
POPOSigningKeyInput.componentType = namedtype.NamedTypes(
namedtype.NamedType(
'authInfo', univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType(
'sender', rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))
),
namedtype.NamedType(
'publicKeyMAC', PKMACValue()
)
)
)
),
namedtype.NamedType('publicKey', rfc3280.SubjectPublicKeyInfo())
)
class POPOSigningKey(univ.Sequence):
pass
POPOSigningKey.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('poposkInput', POPOSigningKeyInput().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('algorithmIdentifier', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
class Attributes(univ.SetOf):
pass
Attributes.componentType = rfc3280.Attribute()
class PrivateKeyInfo(univ.Sequence):
pass
PrivateKeyInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer()),
namedtype.NamedType('privateKeyAlgorithm', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('privateKey', univ.OctetString()),
namedtype.OptionalNamedType('attributes',
Attributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class EncryptedValue(univ.Sequence):
pass
EncryptedValue.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('intendedAlg', rfc3280.AlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('symmAlg', rfc3280.AlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('encSymmKey', univ.BitString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('keyAlg', rfc3280.AlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.OptionalNamedType('valueHint', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.NamedType('encValue', univ.BitString())
)
class EncryptedKey(univ.Choice):
pass
EncryptedKey.componentType = namedtype.NamedTypes(
namedtype.NamedType('encryptedValue', EncryptedValue()),
namedtype.NamedType('envelopedData', rfc3852.EnvelopedData().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class KeyGenParameters(univ.OctetString):
pass
class PKIArchiveOptions(univ.Choice):
pass
PKIArchiveOptions.componentType = namedtype.NamedTypes(
namedtype.NamedType('encryptedPrivKey',
EncryptedKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('keyGenParameters',
KeyGenParameters().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('archiveRemGenPrivKey',
univ.Boolean().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
id_regCtrl_authenticator = _buildOid(id_regCtrl, 2)
id_regInfo = _buildOid(id_pkip, 2)
id_regInfo_certReq = _buildOid(id_regInfo, 2)
class ProtocolEncrKey(rfc3280.SubjectPublicKeyInfo):
pass
class Authenticator(char.UTF8String):
pass
class SubsequentMessage(univ.Integer):
pass
SubsequentMessage.namedValues = namedval.NamedValues(
('encrCert', 0),
('challengeResp', 1)
)
class AttributeTypeAndValue(univ.Sequence):
pass
AttributeTypeAndValue.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', univ.ObjectIdentifier()),
namedtype.NamedType('value', univ.Any())
)
class POPOPrivKey(univ.Choice):
pass
POPOPrivKey.componentType = namedtype.NamedTypes(
namedtype.NamedType('thisMessage',
univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('subsequentMessage',
SubsequentMessage().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('dhMAC',
univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('agreeMAC',
PKMACValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
namedtype.NamedType('encryptedKey', rfc3852.EnvelopedData().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
)
class ProofOfPossession(univ.Choice):
pass
ProofOfPossession.componentType = namedtype.NamedTypes(
namedtype.NamedType('raVerified',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('signature', POPOSigningKey().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('keyEncipherment',
POPOPrivKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
namedtype.NamedType('keyAgreement',
POPOPrivKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
)
class OptionalValidity(univ.Sequence):
pass
OptionalValidity.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('notBefore', rfc3280.Time().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('notAfter', rfc3280.Time().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class CertTemplate(univ.Sequence):
pass
CertTemplate.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('version', rfc3280.Version().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('serialNumber', univ.Integer().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('signingAlg', rfc3280.AlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('issuer', rfc3280.Name().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
namedtype.OptionalNamedType('validity', OptionalValidity().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
namedtype.OptionalNamedType('subject', rfc3280.Name().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
namedtype.OptionalNamedType('publicKey', rfc3280.SubjectPublicKeyInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
namedtype.OptionalNamedType('subjectUID', rfc3280.UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))),
namedtype.OptionalNamedType('extensions', rfc3280.Extensions().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 9)))
)
class Controls(univ.SequenceOf):
pass
Controls.componentType = AttributeTypeAndValue()
Controls.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class CertRequest(univ.Sequence):
pass
CertRequest.componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.NamedType('certTemplate', CertTemplate()),
namedtype.OptionalNamedType('controls', Controls())
)
class CertReqMsg(univ.Sequence):
pass
CertReqMsg.componentType = namedtype.NamedTypes(
namedtype.NamedType('certReq', CertRequest()),
namedtype.OptionalNamedType('popo', ProofOfPossession()),
namedtype.OptionalNamedType('regInfo', univ.SequenceOf(componentType=AttributeTypeAndValue()))
)
class CertReqMessages(univ.SequenceOf):
pass
CertReqMessages.componentType = CertReqMsg()
CertReqMessages.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class CertReq(CertRequest):
pass
id_regCtrl_pkiPublicationInfo = _buildOid(id_regCtrl, 3)
class CertId(univ.Sequence):
pass
CertId.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuer', rfc3280.GeneralName()),
namedtype.NamedType('serialNumber', univ.Integer())
)
class OldCertId(CertId):
pass
class PKIPublicationInfo(univ.Sequence):
pass
PKIPublicationInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('action',
univ.Integer(namedValues=namedval.NamedValues(('dontPublish', 0), ('pleasePublish', 1)))),
namedtype.OptionalNamedType('pubInfos', univ.SequenceOf(componentType=SinglePubInfo()))
)
class EncKeyWithID(univ.Sequence):
pass
EncKeyWithID.componentType = namedtype.NamedTypes(
namedtype.NamedType('privateKey', PrivateKeyInfo()),
namedtype.OptionalNamedType(
'identifier', univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('string', char.UTF8String()),
namedtype.NamedType('generalName', rfc3280.GeneralName())
)
)
)
)
id_regCtrl_protocolEncrKey = _buildOid(id_regCtrl, 6)
id_regCtrl_oldCertID = _buildOid(id_regCtrl, 5)
id_smime = _buildOid(1, 2, 840, 113549, 1, 9, 16)
class PBMParameter(univ.Sequence):
pass
PBMParameter.componentType = namedtype.NamedTypes(
namedtype.NamedType('salt', univ.OctetString()),
namedtype.NamedType('owf', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('iterationCount', univ.Integer()),
namedtype.NamedType('mac', rfc3280.AlgorithmIdentifier())
)
id_regCtrl_regToken = _buildOid(id_regCtrl, 1)
id_regCtrl_pkiArchiveOptions = _buildOid(id_regCtrl, 4)
id_regInfo_utf8Pairs = _buildOid(id_regInfo, 1)
id_ct = _buildOid(id_smime, 1)
id_ct_encKeyWithID = _buildOid(id_ct, 21)
class RegToken(char.UTF8String):
pass
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc4211.py
| 0.617051 | 0.259433 |
rfc4211.py
|
pypi
|
from pyasn1_modules import rfc2315
from pyasn1_modules.rfc2459 import *
MAX = float('inf')
id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
id_pkip = univ.ObjectIdentifier('1.3.6.1.5.5.7.5')
id_regCtrl = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1')
id_regCtrl_regToken = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.1')
id_regCtrl_authenticator = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.2')
id_regCtrl_pkiPublicationInfo = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.3')
id_regCtrl_pkiArchiveOptions = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.4')
id_regCtrl_oldCertID = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.5')
id_regCtrl_protocolEncrKey = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.6')
id_regInfo = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2')
id_regInfo_utf8Pairs = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2.1')
id_regInfo_certReq = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2.2')
# This should be in PKIX Certificate Extensions module
class GeneralName(univ.OctetString):
pass
# end of PKIX Certificate Extensions module
class UTF8Pairs(char.UTF8String):
pass
class ProtocolEncrKey(SubjectPublicKeyInfo):
pass
class CertId(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('issuer', GeneralName()),
namedtype.NamedType('serialNumber', univ.Integer())
)
class OldCertId(CertId):
pass
class KeyGenParameters(univ.OctetString):
pass
class EncryptedValue(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('intendedAlg', AlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('symmAlg', AlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.OptionalNamedType('encSymmKey', univ.BitString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
namedtype.OptionalNamedType('keyAlg', AlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
namedtype.OptionalNamedType('valueHint', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
namedtype.NamedType('encValue', univ.BitString())
)
class EncryptedKey(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('encryptedValue', EncryptedValue()),
namedtype.NamedType('envelopedData', rfc2315.EnvelopedData().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class PKIArchiveOptions(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('encryptedPrivKey', EncryptedKey().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('keyGenParameters', KeyGenParameters().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('archiveRemGenPrivKey',
univ.Boolean().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class SinglePubInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('pubMethod', univ.Integer(
namedValues=namedval.NamedValues(('dontCare', 0), ('x500', 1), ('web', 2), ('ldap', 3)))),
namedtype.OptionalNamedType('pubLocation', GeneralName())
)
class PKIPublicationInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('action',
univ.Integer(namedValues=namedval.NamedValues(('dontPublish', 0), ('pleasePublish', 1)))),
namedtype.OptionalNamedType('pubInfos', univ.SequenceOf(componentType=SinglePubInfo()).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class Authenticator(char.UTF8String):
pass
class RegToken(char.UTF8String):
pass
class SubsequentMessage(univ.Integer):
namedValues = namedval.NamedValues(
('encrCert', 0),
('challengeResp', 1)
)
class POPOPrivKey(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('thisMessage',
univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('subsequentMessage', SubsequentMessage().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('dhMAC',
univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class PBMParameter(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('salt', univ.OctetString()),
namedtype.NamedType('owf', AlgorithmIdentifier()),
namedtype.NamedType('iterationCount', univ.Integer()),
namedtype.NamedType('mac', AlgorithmIdentifier())
)
class PKMACValue(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algId', AlgorithmIdentifier()),
namedtype.NamedType('value', univ.BitString())
)
class POPOSigningKeyInput(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'authInfo', univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType(
'sender', GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType('publicKeyMAC', PKMACValue())
)
)
),
namedtype.NamedType('publicKey', SubjectPublicKeyInfo())
)
class POPOSigningKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('poposkInput', POPOSigningKeyInput().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('algorithmIdentifier', AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
class ProofOfPossession(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('raVerified',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('signature', POPOSigningKey().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('keyEncipherment', POPOPrivKey().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
namedtype.NamedType('keyAgreement', POPOPrivKey().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
)
class Controls(univ.SequenceOf):
componentType = AttributeTypeAndValue()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class OptionalValidity(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('notBefore',
Time().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('notAfter',
Time().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class CertTemplate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('version', Version().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('serialNumber', univ.Integer().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('signingAlg', AlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
namedtype.OptionalNamedType('issuer', Name().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
namedtype.OptionalNamedType('validity', OptionalValidity().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
namedtype.OptionalNamedType('subject', Name().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
namedtype.OptionalNamedType('publicKey', SubjectPublicKeyInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
namedtype.OptionalNamedType('issuerUID', UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
namedtype.OptionalNamedType('subjectUID', UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))),
namedtype.OptionalNamedType('extensions', Extensions().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
)
class CertRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.NamedType('certTemplate', CertTemplate()),
namedtype.OptionalNamedType('controls', Controls())
)
class CertReq(CertRequest):
pass
class CertReqMsg(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReq', CertRequest()),
namedtype.OptionalNamedType('pop', ProofOfPossession()),
namedtype.OptionalNamedType('regInfo', univ.SequenceOf(componentType=AttributeTypeAndValue()).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class CertReqMessages(univ.SequenceOf):
componentType = CertReqMsg()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc2511.py
| 0.400515 | 0.372848 |
rfc2511.py
|
pypi
|
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1_modules import rfc3281
from pyasn1_modules import rfc5280
MAX = float('inf')
def _buildOid(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
class AttCertVersionV1(univ.Integer):
pass
AttCertVersionV1.namedValues = namedval.NamedValues(
('v1', 0)
)
class AttributeCertificateInfoV1(univ.Sequence):
pass
AttributeCertificateInfoV1.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', AttCertVersionV1().subtype(value="v1")),
namedtype.NamedType(
'subject', univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('baseCertificateID', rfc3281.IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('subjectName', rfc5280.GeneralNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
)
),
namedtype.NamedType('issuer', rfc5280.GeneralNames()),
namedtype.NamedType('signature', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('serialNumber', rfc5280.CertificateSerialNumber()),
namedtype.NamedType('attCertValidityPeriod', rfc3281.AttCertValidityPeriod()),
namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc5280.Attribute())),
namedtype.OptionalNamedType('issuerUniqueID', rfc5280.UniqueIdentifier()),
namedtype.OptionalNamedType('extensions', rfc5280.Extensions())
)
class AttributeCertificateV1(univ.Sequence):
pass
AttributeCertificateV1.componentType = namedtype.NamedTypes(
namedtype.NamedType('acInfo', AttributeCertificateInfoV1()),
namedtype.NamedType('signatureAlgorithm', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
class AttributeValue(univ.Any):
pass
class Attribute(univ.Sequence):
pass
Attribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('attrType', univ.ObjectIdentifier()),
namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()))
)
class SignedAttributes(univ.SetOf):
pass
SignedAttributes.componentType = Attribute()
SignedAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class AttributeCertificateV2(rfc3281.AttributeCertificate):
pass
class OtherKeyAttribute(univ.Sequence):
pass
OtherKeyAttribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('keyAttrId', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('keyAttr', univ.Any())
)
class UnauthAttributes(univ.SetOf):
pass
UnauthAttributes.componentType = Attribute()
UnauthAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
id_encryptedData = _buildOid(1, 2, 840, 113549, 1, 7, 6)
class SignatureValue(univ.OctetString):
pass
class IssuerAndSerialNumber(univ.Sequence):
pass
IssuerAndSerialNumber.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuer', rfc5280.Name()),
namedtype.NamedType('serialNumber', rfc5280.CertificateSerialNumber())
)
class SubjectKeyIdentifier(univ.OctetString):
pass
class RecipientKeyIdentifier(univ.Sequence):
pass
RecipientKeyIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier()),
namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
namedtype.OptionalNamedType('other', OtherKeyAttribute())
)
class KeyAgreeRecipientIdentifier(univ.Choice):
pass
KeyAgreeRecipientIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('rKeyId', RecipientKeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class EncryptedKey(univ.OctetString):
pass
class RecipientEncryptedKey(univ.Sequence):
pass
RecipientEncryptedKey.componentType = namedtype.NamedTypes(
namedtype.NamedType('rid', KeyAgreeRecipientIdentifier()),
namedtype.NamedType('encryptedKey', EncryptedKey())
)
class RecipientEncryptedKeys(univ.SequenceOf):
pass
RecipientEncryptedKeys.componentType = RecipientEncryptedKey()
class MessageAuthenticationCode(univ.OctetString):
pass
class CMSVersion(univ.Integer):
pass
CMSVersion.namedValues = namedval.NamedValues(
('v0', 0),
('v1', 1),
('v2', 2),
('v3', 3),
('v4', 4),
('v5', 5)
)
class OtherCertificateFormat(univ.Sequence):
pass
OtherCertificateFormat.componentType = namedtype.NamedTypes(
namedtype.NamedType('otherCertFormat', univ.ObjectIdentifier()),
namedtype.NamedType('otherCert', univ.Any())
)
class ExtendedCertificateInfo(univ.Sequence):
pass
ExtendedCertificateInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('certificate', rfc5280.Certificate()),
namedtype.NamedType('attributes', UnauthAttributes())
)
class Signature(univ.BitString):
pass
class SignatureAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
pass
class ExtendedCertificate(univ.Sequence):
pass
ExtendedCertificate.componentType = namedtype.NamedTypes(
namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
namedtype.NamedType('signature', Signature())
)
class CertificateChoices(univ.Choice):
pass
CertificateChoices.componentType = namedtype.NamedTypes(
namedtype.NamedType('certificate', rfc5280.Certificate()),
namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('v1AttrCert', AttributeCertificateV1().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('v2AttrCert', AttributeCertificateV2().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('other', OtherCertificateFormat().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
)
class CertificateSet(univ.SetOf):
pass
CertificateSet.componentType = CertificateChoices()
class OtherRevocationInfoFormat(univ.Sequence):
pass
OtherRevocationInfoFormat.componentType = namedtype.NamedTypes(
namedtype.NamedType('otherRevInfoFormat', univ.ObjectIdentifier()),
namedtype.NamedType('otherRevInfo', univ.Any())
)
class RevocationInfoChoice(univ.Choice):
pass
RevocationInfoChoice.componentType = namedtype.NamedTypes(
namedtype.NamedType('crl', rfc5280.CertificateList()),
namedtype.NamedType('other', OtherRevocationInfoFormat().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class RevocationInfoChoices(univ.SetOf):
pass
RevocationInfoChoices.componentType = RevocationInfoChoice()
class OriginatorInfo(univ.Sequence):
pass
OriginatorInfo.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('certs', CertificateSet().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class ContentType(univ.ObjectIdentifier):
pass
class EncryptedContent(univ.OctetString):
pass
class ContentEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
pass
class EncryptedContentInfo(univ.Sequence):
pass
EncryptedContentInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', ContentType()),
namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class UnprotectedAttributes(univ.SetOf):
pass
UnprotectedAttributes.componentType = Attribute()
UnprotectedAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
pass
class KEKIdentifier(univ.Sequence):
pass
KEKIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('keyIdentifier', univ.OctetString()),
namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
namedtype.OptionalNamedType('other', OtherKeyAttribute())
)
class KEKRecipientInfo(univ.Sequence):
pass
KEKRecipientInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('kekid', KEKIdentifier()),
namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
namedtype.NamedType('encryptedKey', EncryptedKey())
)
class KeyDerivationAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
pass
class PasswordRecipientInfo(univ.Sequence):
pass
PasswordRecipientInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.OptionalNamedType('keyDerivationAlgorithm', KeyDerivationAlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
namedtype.NamedType('encryptedKey', EncryptedKey())
)
class RecipientIdentifier(univ.Choice):
pass
RecipientIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class KeyTransRecipientInfo(univ.Sequence):
pass
KeyTransRecipientInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('rid', RecipientIdentifier()),
namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
namedtype.NamedType('encryptedKey', EncryptedKey())
)
class UserKeyingMaterial(univ.OctetString):
pass
class OriginatorPublicKey(univ.Sequence):
pass
OriginatorPublicKey.componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('publicKey', univ.BitString())
)
class OriginatorIdentifierOrKey(univ.Choice):
pass
OriginatorIdentifierOrKey.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('originatorKey', OriginatorPublicKey().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class KeyAgreeRecipientInfo(univ.Sequence):
pass
KeyAgreeRecipientInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('originator', OriginatorIdentifierOrKey().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('ukm', UserKeyingMaterial().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
namedtype.NamedType('recipientEncryptedKeys', RecipientEncryptedKeys())
)
class OtherRecipientInfo(univ.Sequence):
pass
OtherRecipientInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('oriType', univ.ObjectIdentifier()),
namedtype.NamedType('oriValue', univ.Any())
)
class RecipientInfo(univ.Choice):
pass
RecipientInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('ktri', KeyTransRecipientInfo()),
namedtype.NamedType('kari', KeyAgreeRecipientInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('kekri', KEKRecipientInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
namedtype.NamedType('pwri', PasswordRecipientInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
namedtype.NamedType('ori', OtherRecipientInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
)
class RecipientInfos(univ.SetOf):
pass
RecipientInfos.componentType = RecipientInfo()
RecipientInfos.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class EnvelopedData(univ.Sequence):
pass
EnvelopedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('recipientInfos', RecipientInfos()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class DigestAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
pass
id_ct_contentInfo = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 6)
id_digestedData = _buildOid(1, 2, 840, 113549, 1, 7, 5)
class EncryptedData(univ.Sequence):
pass
EncryptedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_messageDigest = _buildOid(1, 2, 840, 113549, 1, 9, 4)
id_signedData = _buildOid(1, 2, 840, 113549, 1, 7, 2)
class MessageAuthenticationCodeAlgorithm(rfc5280.AlgorithmIdentifier):
pass
class UnsignedAttributes(univ.SetOf):
pass
UnsignedAttributes.componentType = Attribute()
UnsignedAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class SignerIdentifier(univ.Choice):
pass
SignerIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class SignerInfo(univ.Sequence):
pass
SignerInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('sid', SignerIdentifier()),
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.OptionalNamedType('signedAttrs', SignedAttributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
namedtype.NamedType('signature', SignatureValue()),
namedtype.OptionalNamedType('unsignedAttrs', UnsignedAttributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class SignerInfos(univ.SetOf):
pass
SignerInfos.componentType = SignerInfo()
class Countersignature(SignerInfo):
pass
class ContentInfo(univ.Sequence):
pass
ContentInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', ContentType()),
namedtype.NamedType('content', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class EncapsulatedContentInfo(univ.Sequence):
pass
EncapsulatedContentInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('eContentType', ContentType()),
namedtype.OptionalNamedType('eContent', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
id_countersignature = _buildOid(1, 2, 840, 113549, 1, 9, 6)
id_data = _buildOid(1, 2, 840, 113549, 1, 7, 1)
class MessageDigest(univ.OctetString):
pass
class AuthAttributes(univ.SetOf):
pass
AuthAttributes.componentType = Attribute()
AuthAttributes.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class Time(univ.Choice):
pass
Time.componentType = namedtype.NamedTypes(
namedtype.NamedType('utcTime', useful.UTCTime()),
namedtype.NamedType('generalTime', useful.GeneralizedTime())
)
class AuthenticatedData(univ.Sequence):
pass
AuthenticatedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('recipientInfos', RecipientInfos()),
namedtype.NamedType('macAlgorithm', MessageAuthenticationCodeAlgorithm()),
namedtype.OptionalNamedType('digestAlgorithm', DigestAlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
namedtype.OptionalNamedType('authAttrs', AuthAttributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('mac', MessageAuthenticationCode()),
namedtype.OptionalNamedType('unauthAttrs', UnauthAttributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
id_contentType = _buildOid(1, 2, 840, 113549, 1, 9, 3)
class ExtendedCertificateOrCertificate(univ.Choice):
pass
ExtendedCertificateOrCertificate.componentType = namedtype.NamedTypes(
namedtype.NamedType('certificate', rfc5280.Certificate()),
namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class Digest(univ.OctetString):
pass
class DigestedData(univ.Sequence):
pass
DigestedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
namedtype.NamedType('digest', Digest())
)
id_envelopedData = _buildOid(1, 2, 840, 113549, 1, 7, 3)
class DigestAlgorithmIdentifiers(univ.SetOf):
pass
DigestAlgorithmIdentifiers.componentType = DigestAlgorithmIdentifier()
class SignedData(univ.Sequence):
pass
SignedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', CMSVersion()),
namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
namedtype.OptionalNamedType('certificates', CertificateSet().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('signerInfos', SignerInfos())
)
id_signingTime = _buildOid(1, 2, 840, 113549, 1, 9, 5)
class SigningTime(Time):
pass
id_ct_authData = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 2)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc5652.py
| 0.701406 | 0.328381 |
rfc5652.py
|
pypi
|
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import univ
def _OID(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
md2 = _OID(1, 2, 840, 113549, 2, 2)
md5 = _OID(1, 2, 840, 113549, 2, 5)
id_sha1 = _OID(1, 3, 14, 3, 2, 26)
id_dsa = _OID(1, 2, 840, 10040, 4, 1)
class DSAPublicKey(univ.Integer):
pass
class Dss_Parms(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('g', univ.Integer())
)
id_dsa_with_sha1 = _OID(1, 2, 840, 10040, 4, 3)
class Dss_Sig_Value(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('r', univ.Integer()),
namedtype.NamedType('s', univ.Integer())
)
pkcs_1 = _OID(1, 2, 840, 113549, 1, 1)
rsaEncryption = _OID(pkcs_1, 1)
md2WithRSAEncryption = _OID(pkcs_1, 2)
md5WithRSAEncryption = _OID(pkcs_1, 4)
sha1WithRSAEncryption = _OID(pkcs_1, 5)
class RSAPublicKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer())
)
dhpublicnumber = _OID(1, 2, 840, 10046, 2, 1)
class DHPublicKey(univ.Integer):
pass
class ValidationParms(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('seed', univ.BitString()),
namedtype.NamedType('pgenCounter', univ.Integer())
)
class DomainParameters(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('g', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.OptionalNamedType('j', univ.Integer()),
namedtype.OptionalNamedType('validationParms', ValidationParms())
)
id_keyExchangeAlgorithm = _OID(2, 16, 840, 1, 101, 2, 1, 1, 22)
class KEA_Parms_Id(univ.OctetString):
pass
ansi_X9_62 = _OID(1, 2, 840, 10045)
class FieldID(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('fieldType', univ.ObjectIdentifier()),
namedtype.NamedType('parameters', univ.Any())
)
id_ecSigType = _OID(ansi_X9_62, 4)
ecdsa_with_SHA1 = _OID(id_ecSigType, 1)
class ECDSA_Sig_Value(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('r', univ.Integer()),
namedtype.NamedType('s', univ.Integer())
)
id_fieldType = _OID(ansi_X9_62, 1)
prime_field = _OID(id_fieldType, 1)
class Prime_p(univ.Integer):
pass
characteristic_two_field = _OID(id_fieldType, 2)
class Characteristic_two(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('m', univ.Integer()),
namedtype.NamedType('basis', univ.ObjectIdentifier()),
namedtype.NamedType('parameters', univ.Any())
)
id_characteristic_two_basis = _OID(characteristic_two_field, 3)
gnBasis = _OID(id_characteristic_two_basis, 1)
tpBasis = _OID(id_characteristic_two_basis, 2)
class Trinomial(univ.Integer):
pass
ppBasis = _OID(id_characteristic_two_basis, 3)
class Pentanomial(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('k1', univ.Integer()),
namedtype.NamedType('k2', univ.Integer()),
namedtype.NamedType('k3', univ.Integer())
)
class FieldElement(univ.OctetString):
pass
class ECPoint(univ.OctetString):
pass
class Curve(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('a', FieldElement()),
namedtype.NamedType('b', FieldElement()),
namedtype.OptionalNamedType('seed', univ.BitString())
)
class ECPVer(univ.Integer):
namedValues = namedval.NamedValues(
('ecpVer1', 1)
)
class ECParameters(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', ECPVer()),
namedtype.NamedType('fieldID', FieldID()),
namedtype.NamedType('curve', Curve()),
namedtype.NamedType('base', ECPoint()),
namedtype.NamedType('order', univ.Integer()),
namedtype.OptionalNamedType('cofactor', univ.Integer())
)
class EcpkParameters(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('ecParameters', ECParameters()),
namedtype.NamedType('namedCurve', univ.ObjectIdentifier()),
namedtype.NamedType('implicitlyCA', univ.Null())
)
id_publicKeyType = _OID(ansi_X9_62, 2)
id_ecPublicKey = _OID(id_publicKeyType, 1)
ellipticCurve = _OID(ansi_X9_62, 3)
c_TwoCurve = _OID(ellipticCurve, 0)
c2pnb163v1 = _OID(c_TwoCurve, 1)
c2pnb163v2 = _OID(c_TwoCurve, 2)
c2pnb163v3 = _OID(c_TwoCurve, 3)
c2pnb176w1 = _OID(c_TwoCurve, 4)
c2tnb191v1 = _OID(c_TwoCurve, 5)
c2tnb191v2 = _OID(c_TwoCurve, 6)
c2tnb191v3 = _OID(c_TwoCurve, 7)
c2onb191v4 = _OID(c_TwoCurve, 8)
c2onb191v5 = _OID(c_TwoCurve, 9)
c2pnb208w1 = _OID(c_TwoCurve, 10)
c2tnb239v1 = _OID(c_TwoCurve, 11)
c2tnb239v2 = _OID(c_TwoCurve, 12)
c2tnb239v3 = _OID(c_TwoCurve, 13)
c2onb239v4 = _OID(c_TwoCurve, 14)
c2onb239v5 = _OID(c_TwoCurve, 15)
c2pnb272w1 = _OID(c_TwoCurve, 16)
c2pnb304w1 = _OID(c_TwoCurve, 17)
c2tnb359v1 = _OID(c_TwoCurve, 18)
c2pnb368w1 = _OID(c_TwoCurve, 19)
c2tnb431r1 = _OID(c_TwoCurve, 20)
primeCurve = _OID(ellipticCurve, 1)
prime192v1 = _OID(primeCurve, 1)
prime192v2 = _OID(primeCurve, 2)
prime192v3 = _OID(primeCurve, 3)
prime239v1 = _OID(primeCurve, 4)
prime239v2 = _OID(primeCurve, 5)
prime239v3 = _OID(primeCurve, 6)
prime256v1 = _OID(primeCurve, 7)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc3279.py
| 0.629888 | 0.520131 |
rfc3279.py
|
pypi
|
from pyasn1.type import namedtype
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1_modules.rfc2459 import AlgorithmIdentifier
pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
md4WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.3')
md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
rsaOAEPEncryptionSET = univ.ObjectIdentifier('1.2.840.113549.1.1.6')
id_RSAES_OAEP = univ.ObjectIdentifier('1.2.840.113549.1.1.7')
id_mgf1 = univ.ObjectIdentifier('1.2.840.113549.1.1.8')
id_pSpecified = univ.ObjectIdentifier('1.2.840.113549.1.1.9')
id_sha1 = univ.ObjectIdentifier('1.3.14.3.2.26')
MAX = float('inf')
class Version(univ.Integer):
pass
class RSAPrivateKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
namedtype.NamedType('privateExponent', univ.Integer()),
namedtype.NamedType('prime1', univ.Integer()),
namedtype.NamedType('prime2', univ.Integer()),
namedtype.NamedType('exponent1', univ.Integer()),
namedtype.NamedType('exponent2', univ.Integer()),
namedtype.NamedType('coefficient', univ.Integer())
)
class RSAPublicKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer())
)
# XXX defaults not set
class RSAES_OAEP_params(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('hashFunc', AlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('maskGenFunc', AlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('pSourceFunc', AlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc2437.py
| 0.542136 | 0.333395 |
rfc2437.py
|
pypi
|
"""Classes for cloud/file references yielded by gsutil iterators."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
class BucketListingRef(object):
"""Base class for a reference to one fully expanded iterator result.
This allows polymorphic iteration over wildcard-iterated URLs. The
reference contains a fully expanded URL string containing no wildcards and
referring to exactly one entity (if a wildcard is contained, it is assumed
this is part of the raw string and should never be treated as a wildcard).
Each reference represents a Bucket, Object, or Prefix. For filesystem URLs,
Objects represent files and Prefixes represent directories.
The root_object member contains the underlying object as it was retrieved.
It is populated by the calling iterator, which may only request certain
fields to reduce the number of server requests.
For filesystem URLs, root_object is not populated.
"""
class _BucketListingRefType(object):
"""Enum class for describing BucketListingRefs."""
BUCKET = 'bucket' # Cloud bucket
OBJECT = 'object' # Cloud object or filesystem file
PREFIX = 'prefix' # Cloud bucket subdir or filesystem directory
@property
def url_string(self):
return self._url_string
@property
def type_name(self):
return self._ref_type
def IsBucket(self):
return self._ref_type == self._BucketListingRefType.BUCKET
def IsObject(self):
return self._ref_type == self._BucketListingRefType.OBJECT
def IsPrefix(self):
return self._ref_type == self._BucketListingRefType.PREFIX
def __str__(self):
return self._url_string
class BucketListingBucket(BucketListingRef):
"""BucketListingRef subclass for buckets."""
def __init__(self, storage_url, root_object=None):
"""Creates a BucketListingRef of type bucket.
Args:
storage_url: StorageUrl containing a bucket.
root_object: Underlying object metadata, if available.
"""
super(BucketListingBucket, self).__init__()
self._ref_type = self._BucketListingRefType.BUCKET
self._url_string = storage_url.url_string
self.storage_url = storage_url
self.root_object = root_object
class BucketListingPrefix(BucketListingRef):
"""BucketListingRef subclass for prefixes."""
def __init__(self, storage_url, root_object=None):
"""Creates a BucketListingRef of type prefix.
Args:
storage_url: StorageUrl containing a prefix.
root_object: Underlying object metadata, if available.
"""
super(BucketListingPrefix, self).__init__()
self._ref_type = self._BucketListingRefType.PREFIX
self._url_string = storage_url.url_string
self.storage_url = storage_url
self.root_object = root_object
class BucketListingObject(BucketListingRef):
"""BucketListingRef subclass for objects."""
def __init__(self, storage_url, root_object=None):
"""Creates a BucketListingRef of type object.
Args:
storage_url: StorageUrl containing an object.
root_object: Underlying object metadata, if available.
"""
super(BucketListingObject, self).__init__()
self._ref_type = self._BucketListingRefType.OBJECT
self._url_string = storage_url.url_string
self.storage_url = storage_url
self.root_object = root_object
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/bucket_listing_ref.py
| 0.927157 | 0.244755 |
bucket_listing_ref.py
|
pypi
|
"""File and Cloud URL representation classes."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import re
import stat
from gslib.exception import InvalidUrlError
from gslib.utils import system_util
from gslib.utils import text_util
# Matches provider strings of the form 'gs://'
PROVIDER_REGEX = re.compile(r'(?P<provider>[^:]*)://$')
# Matches bucket strings of the form 'gs://bucket'
BUCKET_REGEX = re.compile(r'(?P<provider>[^:]*)://(?P<bucket>[^/]*)/{0,1}$')
# Matches object strings of the form 'gs://bucket/obj'
OBJECT_REGEX = re.compile(
r'(?P<provider>[^:]*)://(?P<bucket>[^/]*)/(?P<object>.*)')
# Matches versioned object strings of the form 'gs://bucket/obj#1234'
GS_GENERATION_REGEX = re.compile(r'(?P<object>.+)#(?P<generation>[0-9]+)$')
# Matches versioned object strings of the form 's3://bucket/obj#NULL'
S3_VERSION_REGEX = re.compile(r'(?P<object>.+)#(?P<version_id>.+)$')
# Matches file strings of the form 'file://dir/filename'
FILE_OBJECT_REGEX = re.compile(r'([^:]*://)(?P<filepath>.*)')
# Regex to determine if a string contains any wildcards.
WILDCARD_REGEX = re.compile(r'[*?\[\]]')
class StorageUrl(object):
"""Abstract base class for file and Cloud Storage URLs."""
def Clone(self):
raise NotImplementedError('Clone not overridden')
def IsFileUrl(self):
raise NotImplementedError('IsFileUrl not overridden')
def IsCloudUrl(self):
raise NotImplementedError('IsCloudUrl not overridden')
def IsStream():
raise NotImplementedError('IsStream not overridden')
def IsFifo(self):
raise NotImplementedError('IsFifo not overridden')
def CreatePrefixUrl(self, wildcard_suffix=None):
"""Returns a prefix of this URL that can be used for iterating.
Args:
wildcard_suffix: If supplied, this wildcard suffix will be appended to the
prefix with a trailing slash before being returned.
Returns:
A prefix of this URL that can be used for iterating.
If this URL contains a trailing slash, it will be stripped to create the
prefix. This helps avoid infinite looping when prefixes are iterated, but
preserves other slashes so that objects with '/' in the name are handled
properly.
For example, when recursively listing a bucket with the following contents:
gs://bucket// <-- object named slash
gs://bucket//one-dir-deep
a top-level expansion with '/' as a delimiter will result in the following
URL strings:
'gs://bucket//' : OBJECT
'gs://bucket//' : PREFIX
If we right-strip all slashes from the prefix entry and add a wildcard
suffix, we will get 'gs://bucket/*' which will produce identical results
(and infinitely recurse).
Example return values:
('gs://bucket/subdir/', '*') becomes 'gs://bucket/subdir/*'
('gs://bucket/', '*') becomes 'gs://bucket/*'
('gs://bucket/', None) becomes 'gs://bucket'
('gs://bucket/subdir//', '*') becomes 'gs://bucket/subdir//*'
('gs://bucket/subdir///', '**') becomes 'gs://bucket/subdir///**'
('gs://bucket/subdir/', '*') where 'subdir/' is an object becomes
'gs://bucket/subdir/*', but iterating on this will return 'subdir/'
as a BucketListingObject, so we will not recurse on it as a subdir
during listing.
"""
raise NotImplementedError('CreatePrefixUrl not overridden')
@property
def url_string(self):
raise NotImplementedError('url_string not overridden')
@property
def versionless_url_string(self):
raise NotImplementedError('versionless_url_string not overridden')
def __eq__(self, other):
return isinstance(other, StorageUrl) and self.url_string == other.url_string
def __hash__(self):
return hash(self.url_string)
class _FileUrl(StorageUrl):
"""File URL class providing parsing and convenience methods.
This class assists with usage and manipulation of an
(optionally wildcarded) file URL string. Depending on the string
contents, this class represents one or more directories or files.
For File URLs, scheme is always file, bucket_name is always blank,
and object_name contains the file/directory path.
"""
def __init__(self, url_string, is_stream=False, is_fifo=False):
self.scheme = 'file'
self.delim = os.sep
self.bucket_name = ''
# If given a URI that starts with "<scheme>://", the object name should not
# include that prefix.
match = FILE_OBJECT_REGEX.match(url_string)
if match and match.lastindex == 2:
self.object_name = match.group(2)
else:
self.object_name = url_string
# On Windows, the pathname component separator is "\" instead of "/". If we
# find an occurrence of "/", replace it with "\" so that other logic can
# rely on being able to split pathname components on `os.sep`.
if system_util.IS_WINDOWS:
self.object_name = self.object_name.replace('/', '\\')
self.generation = None
self.is_stream = is_stream
self.is_fifo = is_fifo
def Clone(self):
return _FileUrl(self.url_string)
def IsFileUrl(self):
return True
def IsCloudUrl(self):
return False
def IsStream(self):
return self.is_stream
def IsFifo(self):
return self.is_fifo
def IsDirectory(self):
return (not self.IsStream() and not self.IsFifo() and
os.path.isdir(self.object_name))
def CreatePrefixUrl(self, wildcard_suffix=None):
return self.url_string
@property
def url_string(self):
return '%s://%s' % (self.scheme, self.object_name)
@property
def versionless_url_string(self):
return self.url_string
def __str__(self):
return self.url_string
class _CloudUrl(StorageUrl):
"""Cloud URL class providing parsing and convenience methods.
This class assists with usage and manipulation of an
(optionally wildcarded) cloud URL string. Depending on the string
contents, this class represents a provider, bucket(s), or object(s).
This class operates only on strings. No cloud storage API calls are
made from this class.
"""
def __init__(self, url_string):
self.scheme = None
self.delim = '/'
self.bucket_name = None
self.object_name = None
self.generation = None
provider_match = PROVIDER_REGEX.match(url_string)
bucket_match = BUCKET_REGEX.match(url_string)
if provider_match:
self.scheme = provider_match.group('provider')
elif bucket_match:
self.scheme = bucket_match.group('provider')
self.bucket_name = bucket_match.group('bucket')
else:
object_match = OBJECT_REGEX.match(url_string)
if object_match:
self.scheme = object_match.group('provider')
self.bucket_name = object_match.group('bucket')
self.object_name = object_match.group('object')
if self.object_name == '.' or self.object_name == '..':
raise InvalidUrlError('%s is an invalid root-level object name' %
self.object_name)
if self.scheme == 'gs':
generation_match = GS_GENERATION_REGEX.match(self.object_name)
if generation_match:
self.object_name = generation_match.group('object')
self.generation = generation_match.group('generation')
elif self.scheme == 's3':
version_match = S3_VERSION_REGEX.match(self.object_name)
if version_match:
self.object_name = version_match.group('object')
self.generation = version_match.group('version_id')
else:
raise InvalidUrlError(
'CloudUrl: URL string %s did not match URL regex' % url_string)
if url_string[(len(self.scheme) + len('://')):].startswith(self.delim):
raise InvalidUrlError(
'Cloud URL scheme should be followed by colon and two slashes: "://".'
' Found: "{}"'.format(url_string))
def Clone(self):
return _CloudUrl(self.url_string)
def IsFileUrl(self):
return False
def IsCloudUrl(self):
return True
def IsStream(self):
raise NotImplementedError('IsStream not supported on CloudUrl')
def IsFifo(self):
raise NotImplementedError('IsFifo not supported on CloudUrl')
def IsBucket(self):
return bool(self.bucket_name and not self.object_name)
def IsObject(self):
return bool(self.bucket_name and self.object_name)
def HasGeneration(self):
return bool(self.generation)
def IsProvider(self):
return bool(self.scheme and not self.bucket_name)
def CreatePrefixUrl(self, wildcard_suffix=None):
prefix = StripOneSlash(self.versionless_url_string)
if wildcard_suffix:
prefix = '%s/%s' % (prefix, wildcard_suffix)
return prefix
@property
def bucket_url_string(self):
return '%s://%s/' % (self.scheme, self.bucket_name)
@property
def url_string(self):
url_str = self.versionless_url_string
if self.HasGeneration():
url_str += '#%s' % self.generation
return url_str
@property
def versionless_url_string(self):
if self.IsProvider():
return '%s://' % self.scheme
elif self.IsBucket():
return self.bucket_url_string
return '%s://%s/%s' % (self.scheme, self.bucket_name, self.object_name)
def __str__(self):
return self.url_string
def GetSchemeFromUrlString(url_str):
"""Returns scheme component of a URL string."""
end_scheme_idx = url_str.find('://')
if end_scheme_idx == -1:
# File is the default scheme.
return 'file'
else:
return url_str[0:end_scheme_idx].lower()
def IsKnownUrlScheme(scheme_str):
return scheme_str in ('file', 's3', 'gs')
def _GetPathFromUrlString(url_str):
"""Returns path component of a URL string."""
end_scheme_idx = url_str.find('://')
if end_scheme_idx == -1:
return url_str
else:
return url_str[end_scheme_idx + 3:]
def ContainsWildcard(url_string):
"""Checks whether url_string contains a wildcard.
Args:
url_string: URL string to check.
Returns:
bool indicator.
"""
return bool(WILDCARD_REGEX.search(url_string))
def GenerationFromUrlAndString(url, generation):
"""Decodes a generation from a StorageURL and a generation string.
This is used to represent gs and s3 versioning.
Args:
url: StorageUrl representing the object.
generation: Long or string representing the object's generation or
version.
Returns:
Valid generation string for use in URLs.
"""
if url.scheme == 's3' and generation:
return text_util.DecodeLongAsString(generation)
return generation
def HaveFileUrls(args_to_check):
"""Checks whether args_to_check contain any file URLs.
Args:
args_to_check: Command-line argument subset to check.
Returns:
True if args_to_check contains any file URLs.
"""
for url_str in args_to_check:
storage_url = StorageUrlFromString(url_str)
if storage_url.IsFileUrl():
return True
return False
def HaveProviderUrls(args_to_check):
"""Checks whether args_to_check contains any provider URLs (like 'gs://').
Args:
args_to_check: Command-line argument subset to check.
Returns:
True if args_to_check contains any provider URLs.
"""
for url_str in args_to_check:
storage_url = StorageUrlFromString(url_str)
if storage_url.IsCloudUrl() and storage_url.IsProvider():
return True
return False
def IsCloudSubdirPlaceholder(url, blr=None):
"""Determines if a StorageUrl is a cloud subdir placeholder.
This function is needed because GUI tools (like the GCS cloud console) allow
users to create empty "folders" by creating a placeholder object; and parts
of gsutil need to treat those placeholder objects specially. For example,
gsutil rsync needs to avoid downloading those objects because they can cause
conflicts (see comments in rsync command for details).
We currently detect two cases:
- Cloud objects whose name ends with '_$folder$'
- Cloud objects whose name ends with '/'
Args:
url: (gslib.storage_url.StorageUrl) The URL to be checked.
blr: (gslib.BucketListingRef or None) The blr to check, or None if not
available. If `blr` is None, size won't be checked.
Returns:
(bool) True if the URL is a cloud subdir placeholder, otherwise False.
"""
if not url.IsCloudUrl():
return False
url_str = url.url_string
if url_str.endswith('_$folder$'):
return True
if blr and blr.IsObject():
size = blr.root_object.size
else:
size = 0
return size == 0 and url_str.endswith('/')
def IsFileUrlString(url_str):
"""Returns whether a string is a file URL."""
return GetSchemeFromUrlString(url_str) == 'file'
def StorageUrlFromString(url_str):
"""Static factory function for creating a StorageUrl from a string."""
scheme = GetSchemeFromUrlString(url_str)
if not IsKnownUrlScheme(scheme):
raise InvalidUrlError('Unrecognized scheme "%s"' % scheme)
if scheme == 'file':
path = _GetPathFromUrlString(url_str)
is_stream = (path == '-')
is_fifo = False
try:
is_fifo = stat.S_ISFIFO(os.stat(path).st_mode)
except OSError:
pass
return _FileUrl(url_str, is_stream=is_stream, is_fifo=is_fifo)
return _CloudUrl(url_str)
def StripOneSlash(url_str):
if url_str and url_str.endswith('/'):
return url_str[:-1]
return url_str
def UrlsAreForSingleProvider(url_args):
"""Tests whether the URLs are all for a single provider.
Args:
url_args: (Iterable[str]) Collection of strings to check.
Returns:
True if all URLs are for single provider; False if `url_args` was empty (as
this would not result in a single unique provider) or URLs targeted multiple
unique providers.
"""
provider = None
url = None
for url_str in url_args:
url = StorageUrlFromString(url_str)
if not provider:
provider = url.scheme
elif url.scheme != provider:
return False
return provider is not None
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/storage_url.py
| 0.788054 | 0.212395 |
storage_url.py
|
pypi
|
"""Iterator wrapper for checking wrapped iterator's emptiness or plurality."""
# TODO: Here and elsewhere (wildcard_iterator, name_expansion), do not reference
# __iter__ directly because it causes the first element to be instantiated.
# Instead, implement __iter__ as a return self and implement the next() function
# which returns (not yields) the values. This necessitates that in the case
# of the iterator classes, the iterator is used once per class instantiation
# so that next() calls do not collide, but this semantic has been long-assumed
# by the iterator classes for the use of __iter__ anyway.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
import six
class PluralityCheckableIterator(six.Iterator):
"""Iterator wrapper class.
Allows you to check whether the wrapped iterator is empty and
whether it has more than 1 element. This iterator accepts three types of
values from the iterator it wraps:
1. A yielded element (this is the normal case).
2. A raised exception, which will be buffered and re-raised when it
is reached in this iterator.
3. A yielded tuple of (exception, stack trace), which will be buffered
and raised with it is reached in this iterator.
"""
def __init__(self, it):
# Need to get the iterator function here so that we don't immediately
# instantiate the first element (which could raise an exception).
self.orig_iterator = it
self.base_iterator = None
self.head = []
self.underlying_iter_empty = False
def _PopulateHead(self, num_elements=1):
"""Populates self.head from the underlying iterator.
Args:
num_elements: Populate until self.head contains this many
elements (or until the underlying iterator runs out).
Returns:
Number of elements at self.head after execution complete.
"""
while not self.underlying_iter_empty and len(self.head) < num_elements:
try:
if not self.base_iterator:
self.base_iterator = iter(self.orig_iterator)
e = next(self.base_iterator)
self.underlying_iter_empty = False
if isinstance(e, tuple) and isinstance(e[0], Exception):
self.head.append(('exception', e[0], e[1]))
else:
self.head.append(('element', e))
except StopIteration:
# Indicates we can no longer call next() on underlying iterator, but
# there could still be elements left to iterate in head.
self.underlying_iter_empty = True
except Exception as e: # pylint: disable=broad-except
# Buffer the exception and raise it when the element is accessed.
# Also, preserve the original stack trace, as the stack trace from
# within plurality_checkable_iterator.next is not very useful.
self.head.append(('exception', e, sys.exc_info()[2]))
return len(self.head)
def __iter__(self):
return self
def __next__(self):
if self._PopulateHead():
item_tuple = self.head.pop(0)
if item_tuple[0] == 'element':
return item_tuple[1]
else: # buffered exception
raise six.reraise(item_tuple[1].__class__, item_tuple[1], item_tuple[2])
raise StopIteration()
def IsEmpty(self):
return not self._PopulateHead()
def HasPlurality(self):
# Populate 2 elements (if possible) into head so we can check whether
# iterator has more than 1 item remaining.
return self._PopulateHead(num_elements=2) > 1
def PeekException(self):
"""Raises an exception if the first iterated element raised."""
if self._PopulateHead() and self.head[0][0] == 'exception':
exception_tuple = self.head[0]
raise six.reraise(exception_tuple[1].__class__, exception_tuple[1],
exception_tuple[2])
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/plurality_checkable_iterator.py
| 0.654895 | 0.394026 |
plurality_checkable_iterator.py
|
pypi
|
"""Contains classes related to argparse-based argument parsing."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from .tab_complete import CompleterType
class CommandArgument(object):
"""Argparse style argument."""
def __init__(self, *args, **kwargs):
"""Constructs an argparse argument with the given data.
See add_argument in argparse for description of the options.
The only deviation from the argparse arguments is the 'completer' parameter.
If 'completer' is present, it's used as the argcomplete completer for the
argument.
Args:
*args: Position args to pass to argparse add_argument
**kwargs: Named args to pass to argparse add_argument
"""
completer = None
if 'completer' in kwargs:
completer = kwargs['completer']
del kwargs['completer']
self.args = args
self.kwargs = kwargs
self.completer = completer
@staticmethod
def MakeZeroOrMoreCloudURLsArgument():
"""Constructs an argument that takes 0 or more Cloud URLs as parameters."""
return CommandArgument('file',
nargs='*',
completer=CompleterType.CLOUD_OBJECT)
@staticmethod
def MakeZeroOrMoreCloudBucketURLsArgument():
"""Constructs an argument that takes 0+ Cloud bucket URLs as parameters."""
return CommandArgument('file',
nargs='*',
completer=CompleterType.CLOUD_BUCKET)
@staticmethod
def MakeNCloudBucketURLsArgument(n):
"""Constructs an argument that takes N Cloud bucket URLs as parameters."""
return CommandArgument('file',
nargs=n,
completer=CompleterType.CLOUD_BUCKET)
@staticmethod
def MakeNCloudURLsArgument(n):
"""Constructs an argument that takes N Cloud URLs as parameters."""
return CommandArgument('file',
nargs=n,
completer=CompleterType.CLOUD_OBJECT)
@staticmethod
def MakeZeroOrMoreCloudOrFileURLsArgument():
"""Constructs an argument that takes 0 or more Cloud or File URLs."""
return CommandArgument('file',
nargs='*',
completer=CompleterType.CLOUD_OR_LOCAL_OBJECT)
@staticmethod
def MakeNCloudOrFileURLsArgument(n):
"""Constructs an argument that takes N Cloud or File URLs as parameters."""
return CommandArgument('file',
nargs=n,
completer=CompleterType.CLOUD_OR_LOCAL_OBJECT)
@staticmethod
def MakeZeroOrMoreFileURLsArgument():
"""Constructs an argument that takes 0 or more File URLs as parameters."""
return CommandArgument('file',
nargs='*',
completer=CompleterType.LOCAL_OBJECT)
@staticmethod
def MakeNFileURLsArgument(n):
"""Constructs an argument that takes N File URLs as parameters."""
return CommandArgument('file',
nargs=n,
completer=CompleterType.LOCAL_OBJECT)
@staticmethod
def MakeFileURLOrCannedACLArgument():
"""Constructs an argument that takes a File URL or a canned ACL."""
return CommandArgument('file',
nargs=1,
completer=CompleterType.LOCAL_OBJECT_OR_CANNED_ACL)
@staticmethod
def MakeFreeTextArgument():
"""Constructs an argument that takes arbitrary text."""
return CommandArgument('text', completer=CompleterType.NO_OP)
@staticmethod
def MakeOneOrMoreBindingsArgument():
"""Constructs an argument that takes multiple bindings."""
return CommandArgument('binding', nargs='+', completer=CompleterType.NO_OP)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/command_argument.py
| 0.910753 | 0.290949 |
command_argument.py
|
pypi
|
"""FilePart implementation for representing part of a file."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import io
class FilePart(io.IOBase):
"""Subclass of the file API for representing part of a file.
This class behaves as a contiguous subset of a given file (e.g., this object
will behave as though the desired part of the file was written to another
file, and the second file was opened).
"""
# pylint: disable=super-init-not-called
def __init__(self, filename, offset, length):
"""Initializes the FilePart.
Args:
filename: The name of the existing file, of which this object represents
a part.
offset: The position (in bytes) in the original file that corresponds to
the first byte of the FilePart.
length: The total number of bytes in the FilePart.
"""
self._fp = open(filename, 'rb')
self.length = length
self._start = offset
self._end = self._start + self.length
self._fp.seek(self._start)
def __enter__(self):
pass
# pylint: disable=redefined-builtin
def __exit__(self, type, value, traceback):
self.close()
def tell(self):
return self._fp.tell() - self._start
def read(self, size=-1):
if size < 0:
size = self.length
size = min(size, self._end - self._fp.tell()) # Only read to our EOF
return self._fp.read(max(0, size))
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_END:
return self._fp.seek(offset + self._end)
elif whence == os.SEEK_CUR:
return self._fp.seek(offset, whence)
else:
return self._fp.seek(self._start + offset)
def close(self):
self._fp.close()
def flush(self, size=None):
raise NotImplementedError('flush is not implemented in FilePart.')
def fileno(self, size=None):
raise NotImplementedError('fileno is not implemented in FilePart.')
def isatty(self, size=None):
raise NotImplementedError('isatty is not implemented in FilePart.')
def next(self, size=None):
raise NotImplementedError('next is not implemented in FilePart.')
def readline(self, size=None):
raise NotImplementedError('readline is not implemented in FilePart.')
def readlines(self, size=None):
raise NotImplementedError('readlines is not implemented in FilePart.')
def xreadlines(self, size=None):
raise NotImplementedError('xreadlines is not implemented in FilePart.')
def truncate(self, size=None):
raise NotImplementedError('truncate is not implemented in FilePart.')
def write(self, size=None):
raise NotImplementedError('write is not implemented in FilePart.')
def writelines(self, size=None):
raise NotImplementedError('writelines is not implemented in FilePart.')
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/file_part.py
| 0.829388 | 0.248643 |
file_part.py
|
pypi
|
"""API map classes used with the CloudApiDelegator class."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.boto_translation import BotoTranslation
from gslib.gcs_json_api import GcsJsonApi
class ApiSelector(object):
"""Enum class for API."""
XML = 'XML'
JSON = 'JSON'
class ApiMapConstants(object):
"""Enum class for API map entries."""
API_MAP = 'apiclass'
SUPPORT_MAP = 'supported'
DEFAULT_MAP = 'default'
class GsutilApiClassMapFactory(object):
"""Factory for generating gsutil API class maps.
A valid class map is defined as:
{
(key) Provider prefix used in URI strings.
(value) {
(key) ApiSelector describing the API format.
(value) CloudApi child class that implements this API.
}
}
"""
@classmethod
def GetClassMap(cls):
"""Returns the default gsutil class map."""
gs_class_map = {
ApiSelector.XML: BotoTranslation,
ApiSelector.JSON: GcsJsonApi
}
s3_class_map = {
ApiSelector.XML: BotoTranslation,
}
class_map = {
'gs': gs_class_map,
's3': s3_class_map,
}
return class_map
class GsutilApiMapFactory(object):
"""Factory the generates the default gsutil API map.
The API map determines which Cloud API implementation is used for a given
command. A valid API map is defined as:
{
(key) ApiMapConstants.API_MAP : (value) Gsutil API class map (as
described in GsutilApiClassMapFactory comments).
(key) ApiMapConstants.SUPPORT_MAP : (value) {
(key) Provider prefix used in URI strings.
(value) list of ApiSelectors supported by the command for this provider.
}
(key) ApiMapConstants.DEFAULT_MAP : (value) {
(key) Provider prefix used in URI strings.
(value) Default ApiSelector for this command and provider.
}
}
"""
@classmethod
def GetApiMap(cls, gsutil_api_class_map_factory, support_map, default_map):
"""Creates a GsutilApiMap for use by the command from the inputs.
Args:
gsutil_api_class_map_factory: Factory defining a GetClassMap() function
adhering to GsutilApiClassMapFactory
semantics.
support_map: Entries for ApiMapConstants.SUPPORT_MAP as described above.
default_map: Entries for ApiMapConstants.DEFAULT_MAP as described above.
Returns:
GsutilApiMap generated from the inputs.
"""
return {
ApiMapConstants.API_MAP: gsutil_api_class_map_factory.GetClassMap(),
ApiMapConstants.SUPPORT_MAP: support_map,
ApiMapConstants.DEFAULT_MAP: default_map
}
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/cs_api_map.py
| 0.936467 | 0.158891 |
cs_api_map.py
|
pypi
|
"""Threading code for estimating total work of long-running gsutil commands."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import threading
import time
from gslib import thread_message
from gslib.utils import constants
from gslib.utils import parallelism_framework_util
_PutToQueueWithTimeout = parallelism_framework_util.PutToQueueWithTimeout
class SeekAheadResult(object):
"""Result class for seek_ahead_iterator results.
A class is used instead of a namedtuple, making it easier to document
and use default keyword arguments.
"""
def __init__(self, est_num_ops=1, data_bytes=0):
"""Create a SeekAheadResult.
Args:
est_num_ops: Number of operations the iterated result represents.
Operation is loosely defined as a single API call for a single
object. The total number of API calls may not be known at the time of
iteration, so this number is approximate.
data_bytes: Number of data bytes that will be transferred (uploaded,
downloaded, or rewritten) for this iterated result.
"""
self.est_num_ops = est_num_ops
self.data_bytes = data_bytes
class SeekAheadThread(threading.Thread):
"""Thread to estimate total work to be performed by all processes and threads.
Because the ProducerThread can only buffer a certain number of tasks on the
global task queue, it cannot reliably provide the total count or size of
iterated results for operations involving many iterated arguments until it
nears the end of iteration.
This thread consumes an iterator that should be functionally identical
to the ProducerThread, but iterates to the end without adding tasks to the
global task queue in an effort to estimate the amount of total work that the
call to Apply will perform. It should be used only for large operations, and
thus it is created by the main ProducerThread only when the number of
iterated arguments exceeds a threshold.
This thread may produce an inaccurate estimate if its iterator produces
different results than the iterator used by the ProducerThread. This can
happen due to eventual listing consistency or due to the source being
modified as iteration occurs.
This thread estimates operations for top-level objects only;
sub-operations (such as a parallel composite upload) should be reported via
the iterator as a single object including the total number of bytes affected.
"""
def __init__(self, seek_ahead_iterator, cancel_event, status_queue):
"""Initializes the seek ahead thread.
Args:
seek_ahead_iterator: Iterator matching the ProducerThread's args_iterator,
but returning only object name and/or size in the result.
cancel_event: threading.Event for signaling the
seek-ahead iterator to terminate.
status_queue: Status queue for posting summary of fully iterated results.
"""
super(SeekAheadThread, self).__init__()
self.status_queue = status_queue
self.seek_ahead_iterator = seek_ahead_iterator
self.cancel_event = cancel_event
# For unit-testing only; use cancel_event to stop the thread.
self.terminate = False
self.start()
def run(self):
num_objects = 0
num_data_bytes = 0
try:
for seek_ahead_result in self.seek_ahead_iterator:
if self.terminate:
return
# Periodically check to see if the ProducerThread has actually
# completed, at which point providing an estimate is no longer useful.
if (num_objects % constants.NUM_OBJECTS_PER_LIST_PAGE) == 0:
if self.cancel_event.isSet():
return
num_objects += seek_ahead_result.est_num_ops
num_data_bytes += seek_ahead_result.data_bytes
except OSError as e:
# This can happen because the seek_ahead_iterator races with the command
# being run (e.g., a gsutil mv command, which iterates over and moves
# files while the estimator is concurrently iterating over the files to
# count them). If this happens return here without calling
# _PutToQueueWithTimeout, so we don't signal the UI thread that seek ahead
# work has completed its estimation. This will cause no estimate to be
# printed, but the command will continue to execute as usual.
return
if self.cancel_event.isSet():
return
_PutToQueueWithTimeout(
self.status_queue,
thread_message.SeekAheadMessage(num_objects, num_data_bytes,
time.time()))
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/seek_ahead_thread.py
| 0.898127 | 0.383468 |
seek_ahead_thread.py
|
pypi
|
"""Gsutil API for interacting with cloud storage providers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
class CloudApi(object):
"""Abstract base class for interacting with cloud storage providers.
Implementations of the gsutil Cloud API are not guaranteed to be thread-safe.
Behavior when calling a gsutil Cloud API instance simultaneously across
threads is undefined and doing so will likely cause errors. Therefore,
a separate instance of the gsutil Cloud API should be instantiated per-thread.
"""
def __init__(self,
bucket_storage_uri_class,
logger,
status_queue,
provider=None,
debug=0,
trace_token=None,
perf_trace_token=None,
user_project=None):
"""Performs necessary setup for interacting with the cloud storage provider.
Args:
bucket_storage_uri_class: boto storage_uri class, used by APIs that
provide boto translation or mocking.
logger: logging.logger for outputting log messages.
status_queue: Queue for relaying status to UI.
provider: Default provider prefix describing cloud storage provider to
connect to.
debug: Debug level for the API implementation (0..3).
trace_token: Google internal trace token to pass to the API
implementation (string).
perf_trace_token: Performance trace token to use when making API calls.
user_project: Project to be billed for this request.
"""
self.bucket_storage_uri_class = bucket_storage_uri_class
self.logger = logger
self.status_queue = status_queue
self.provider = provider
self.debug = debug
self.trace_token = trace_token
self.perf_trace_token = perf_trace_token
self.user_project = user_project
def GetServiceAccountId(self):
"""Returns the service account email id."""
raise NotImplementedError('GetServiceAccountId must be overridden.')
def GetBucket(self, bucket_name, provider=None, fields=None):
"""Gets Bucket metadata.
Args:
bucket_name: Name of the bucket.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Bucket metadata fields, for
example, ['logging', 'defaultObjectAcl']
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Bucket object.
"""
raise NotImplementedError('GetBucket must be overloaded')
def GetBucketIamPolicy(self, bucket_name, provider=None, fields=None):
"""Returns an IAM policy for the specified Bucket.
Args:
bucket_name: Name of the bucket.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only the IAM policy fields specified.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with the cloud storage providers.
Returns:
Policy object of the bucket.
"""
raise NotImplementedError('GetBucketIamPolicy must be overloaded')
def SetBucketIamPolicy(self, bucket_name, policy, provider=None):
"""Sets an IAM policy for the specified Bucket.
Args:
bucket_name: Name of the bucket.
policy: A Policy object describing the IAM policy.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with the cloud storage providers.
Returns:
Policy object of the bucket. May differ from input Policy.
"""
raise NotImplementedError('SetBucketIamPolicy must be overloaded')
def SignUrl(self, method, duration, path, generation, logger, region,
signed_headers, string_to_sign_debug):
"""Sign a url using service account's system managed private key.
Args:
method: The HTTP method to be used with the signed URL.
duration: timedelta for which the constructed signed URL should be valid.
path: String path to the bucket or object for signing, in the form
'bucket' or 'bucket/object'.
generation: If not None, specifies a version of an object for signing.
logger: logging.Logger for warning and debug output.
region: Geographic region in which the requested resource resides.
signed_headers: Dict containing the header info like host
content-type etc.
string_to_sign_debug: If true AND logger is enabled for debug level,
print string to sign to debug. Used to differentiate user's
signed URL from the probing permissions-check signed URL.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
CommandException for errors because of invalid account used for signing.
Returns:
The signed url.
"""
raise NotImplementedError('SignUrl must be overloaded')
def ListBuckets(self, project_id=None, provider=None, fields=None):
"""Lists bucket metadata for the given project.
Args:
project_id: Project owning the buckets, default from config if None.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these metadata fields for the listing,
for example:
['items/logging', 'items/defaultObjectAcl'].
Note that the WildcardIterator class should be used to list
buckets instead of calling this function directly. It amends
the fields definition from get-like syntax such as
['logging', 'defaultObjectAcl'] so that the caller does not
need to prepend 'items/' or specify fields necessary for listing
(like nextPageToken).
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Iterator over Bucket objects.
"""
raise NotImplementedError('ListBuckets must be overloaded')
def PatchBucket(self,
bucket_name,
metadata,
canned_acl=None,
canned_def_acl=None,
preconditions=None,
provider=None,
fields=None):
"""Updates bucket metadata for the bucket with patch semantics.
Args:
bucket_name: Name of bucket to update.
metadata: Bucket object defining metadata to be updated.
canned_acl: Canned ACL to apply to the bucket.
canned_def_acl: Canned default object ACL to apply to the bucket.
preconditions: Preconditions for the request.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Bucket metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Bucket object describing new bucket metadata.
"""
raise NotImplementedError('PatchBucket must be overloaded')
def LockRetentionPolicy(self, bucket_name, metageneration, provider=None):
"""Locks the Retention Policy on the bucket.
Args:
bucket_name: Name of bucket to update.
metageneration: Bucket metageneration to use as a precondition.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ServiceException for errors interacting with cloud storage providers.
Returns:
None
"""
raise NotImplementedError('LockRetentionPolicy must be overloaded')
def CreateBucket(self,
bucket_name,
project_id=None,
metadata=None,
provider=None,
fields=None):
"""Creates a new bucket with the specified metadata.
Args:
bucket_name: Name of the new bucket.
project_id: Project owner of the new bucket, default from config if None.
metadata: Bucket object defining new bucket metadata.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Bucket metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Bucket object describing new bucket metadata.
"""
raise NotImplementedError('CreateBucket must be overloaded')
def DeleteBucket(self, bucket_name, preconditions=None, provider=None):
"""Deletes a bucket.
Args:
bucket_name: Name of the bucket to delete.
preconditions: Preconditions for the request.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
None.
"""
raise NotImplementedError('DeleteBucket must be overloaded')
class CsObjectOrPrefixType(object):
"""Enum class for describing CsObjectOrPrefix types."""
OBJECT = 'object' # Cloud object
PREFIX = 'prefix' # Cloud bucket subdirectory
class CsObjectOrPrefix(object):
"""Container class for ListObjects results."""
def __init__(self, data, datatype):
"""Stores a ListObjects result.
Args:
data: Root object, either an apitools Object or a string Prefix.
datatype: CsObjectOrPrefixType of data.
"""
self.data = data
self.datatype = datatype
def ListObjects(self,
bucket_name,
prefix=None,
delimiter=None,
all_versions=None,
provider=None,
fields=None):
"""Lists objects (with metadata) and prefixes in a bucket.
Args:
bucket_name: Bucket containing the objects.
prefix: Prefix for directory-like behavior.
delimiter: Delimiter for directory-like behavior.
all_versions: If true, list all object versions.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these metadata fields for the listing,
for example:
['items/acl', 'items/updated', 'prefixes'].
Note that the WildcardIterator class should be used to list
objects instead of calling this function directly. It amends
the fields definition from get-like syntax such as
['acl', 'updated'] so that the caller does not need to
prepend 'items/' or specify any fields necessary for listing
(such as prefixes or nextPageToken).
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Iterator over CsObjectOrPrefix wrapper class.
"""
raise NotImplementedError('ListObjects must be overloaded')
def GetObjectIamPolicy(self,
bucket_name,
object_name,
generation=None,
provider=None,
fields=None):
"""Gets IAM policy for specified Object.
Args:
bucket_name: Bucket containing the object.
object_name: Name of the object.
generation: Generation of the object to retrieve.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only the IAM policy fields specified.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Object IAM policy.
"""
raise NotImplementedError('GetObjectIamPolicy must be overloaded')
def SetObjectIamPolicy(self,
bucket_name,
object_name,
policy,
generation=None,
provider=None):
"""Sets IAM policy for specified Object.
Args:
bucket_name: Bucket containing the object.
object_name: Name of the object.
policy: IAM Policy object.
generation: Generation of the object to which the IAM policy will apply.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Policy object of the object. May differ from input Policy.
"""
raise NotImplementedError('SetObjectIamPolicy must be overloaded')
def GetObjectMetadata(self,
bucket_name,
object_name,
generation=None,
provider=None,
fields=None):
"""Gets object metadata.
If decryption is supported by the implementing class, this function will
read decryption keys from configuration and appropriately retry requests to
encrypted objects with the correct key.
Args:
bucket_name: Bucket containing the object.
object_name: Object name.
generation: Generation of the object to retrieve.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields, for
example, ['acl', 'updated'].
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Object object.
"""
raise NotImplementedError('GetObjectMetadata must be overloaded')
def PatchObjectMetadata(self,
bucket_name,
object_name,
metadata,
canned_acl=None,
generation=None,
preconditions=None,
provider=None,
fields=None):
"""Updates object metadata with patch semantics.
Args:
bucket_name: Bucket containing the object.
object_name: Object name for object.
metadata: Object object defining metadata to be updated.
canned_acl: Canned ACL to be set on the object.
generation: Generation (or version) of the object to update.
preconditions: Preconditions for the request.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Updated object metadata.
"""
raise NotImplementedError('PatchObjectMetadata must be overloaded')
class DownloadStrategy(object):
"""Enum class for specifying download strategy."""
ONE_SHOT = 'oneshot'
RESUMABLE = 'resumable'
# TODO: Change {de,en}cryption_tuple field names, as they don't actually
# accept tuples.
def GetObjectMedia(self,
bucket_name,
object_name,
download_stream,
provider=None,
generation=None,
object_size=None,
compressed_encoding=False,
download_strategy=DownloadStrategy.ONE_SHOT,
start_byte=0,
end_byte=None,
progress_callback=None,
serialization_data=None,
digesters=None,
decryption_tuple=None):
"""Gets object data.
Args:
bucket_name: Bucket containing the object.
object_name: Object name.
download_stream: Stream to send the object data to.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
generation: Generation of the object to retrieve.
object_size: Total size of the object being downloaded.
compressed_encoding: If true, object is stored with a compressed encoding.
download_strategy: Cloud API download strategy to use for download.
start_byte: Starting point for download (for resumable downloads and
range requests). Can be set to negative to request a range
of bytes (python equivalent of [:-3])
end_byte: Ending byte number, inclusive, for download (for range
requests). If None, download the rest of the object.
progress_callback: Optional callback function for progress notifications.
Receives calls with arguments
(bytes_transferred, total_size).
serialization_data: Implementation-specific JSON string of a dict
containing serialization information for the download.
digesters: Dict of {string : digester}, where string is a name of a hash
algorithm, and digester is a validation digester that supports
update(bytes) and digest() using that algorithm.
Implementation can set the digester value to None to indicate
bytes were not successfully digested on-the-fly.
decryption_tuple: Optional utils.encryption_helper.CryptoKeyWrapper for
decrypting an encrypted object.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Content-encoding string if it was detected that the server sent an encoded
object during transfer, None otherwise.
"""
raise NotImplementedError('GetObjectMedia must be overloaded')
def UploadObject(self,
upload_stream,
object_metadata,
canned_acl=None,
size=None,
preconditions=None,
progress_callback=None,
encryption_tuple=None,
provider=None,
fields=None,
gzip_encoded=False):
"""Uploads object data and metadata.
Args:
upload_stream: Seekable stream of object data.
object_metadata: Object metadata for new object. Must include bucket
and object name.
canned_acl: Optional canned ACL to apply to object. Overrides ACL set
in object_metadata.
size: Optional object size.
preconditions: Preconditions for the request.
progress_callback: Optional callback function for progress notifications.
Receives calls with arguments
(bytes_transferred, total_size).
encryption_tuple: Optional utils.encryption_helper.CryptoKeyWrapper for
encrypting the uploaded object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields.
gzip_encoded: Whether to use gzip transport encoding for the upload.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Object object for newly created destination object.
"""
raise NotImplementedError('UploadObject must be overloaded')
def UploadObjectStreaming(self,
upload_stream,
object_metadata,
canned_acl=None,
preconditions=None,
progress_callback=None,
encryption_tuple=None,
provider=None,
fields=None,
gzip_encoded=False):
"""Uploads object data and metadata.
Args:
upload_stream: Stream of object data. May not be seekable.
object_metadata: Object metadata for new object. Must include bucket
and object name.
canned_acl: Optional canned ACL to apply to object. Overrides ACL set
in object_metadata.
preconditions: Preconditions for the request.
progress_callback: Optional callback function for progress notifications.
Receives calls with arguments
(bytes_transferred, total_size), but fills in only
bytes_transferred.
encryption_tuple: Optional utils.encryption_helper.CryptoKeyWrapper for
encrypting the uploaded object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields.
gzip_encoded: Whether to use gzip transport encoding for the upload.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Object object for newly created destination object.
"""
raise NotImplementedError('UploadObjectStreaming must be overloaded')
def UploadObjectResumable(self,
upload_stream,
object_metadata,
canned_acl=None,
size=None,
preconditions=None,
serialization_data=None,
tracker_callback=None,
progress_callback=None,
encryption_tuple=None,
provider=None,
fields=None,
gzip_encoded=False):
"""Uploads object data and metadata using a resumable upload strategy.
Args:
upload_stream: Seekable stream of object data.
object_metadata: Object metadata for new object. Must include bucket
and object name.
canned_acl: Optional canned ACL to apply to object. Overrides ACL set
in object_metadata.
size: Total size of the object.
preconditions: Preconditions for the request.
serialization_data: Dict of {'url' : UploadURL} allowing for uploads to
be resumed.
tracker_callback: Callback function taking a upload URL string.
Guaranteed to be called when the implementation gets an
upload URL, allowing the caller to resume the upload
across process breaks by saving the upload URL in
a tracker file.
progress_callback: Optional callback function for progress notifications.
Receives calls with arguments
(bytes_transferred, total_size).
encryption_tuple: Optional utils.encryption_helper.CryptoKeyWrapper for
encrypting the uploaded object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields when the
upload is complete.
gzip_encoded: Whether to use gzip transport encoding for the upload.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Object object for newly created destination object.
"""
raise NotImplementedError('UploadObjectResumable must be overloaded')
# TODO: Change {de,en}cryption_tuple field names, as they don't actually
# accept tuples.
def CopyObject(self,
src_obj_metadata,
dst_obj_metadata,
src_generation=None,
canned_acl=None,
preconditions=None,
progress_callback=None,
max_bytes_per_call=None,
encryption_tuple=None,
decryption_tuple=None,
provider=None,
fields=None):
"""Copies an object in the cloud.
Args:
src_obj_metadata: Object metadata for source object. Must include
bucket name, object name, and etag.
dst_obj_metadata: Object metadata for new object. Must include bucket
and object name.
src_generation: Generation of the source object to copy.
canned_acl: Optional canned ACL to apply to destination object. Overrides
ACL set in dst_obj_metadata.
preconditions: Destination object preconditions for the request.
progress_callback: Optional callback function for progress notifications.
Receives calls with arguments
(bytes_transferred, total_size).
max_bytes_per_call: Integer describing maximum number of bytes
to rewrite per service call.
encryption_tuple: Optional utils.encryption_helper.CryptoKeyWrapper for
encrypting the destination object.
decryption_tuple: Optional utils.encryption_helper.CryptoKeyWrapper for
decrypting the source object. If supplied without
encryption_tuple, destination object will be written
without customer-supplied encryption.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Object object for newly created destination object.
"""
raise NotImplementedError('CopyObject must be overloaded')
def ComposeObject(self,
src_objs_metadata,
dst_obj_metadata,
preconditions=None,
encryption_tuple=None,
provider=None,
fields=None):
"""Composes an object in the cloud.
Args:
src_objs_metadata: List of ComposeRequest.SourceObjectsValueListEntries
specifying the objects to compose.
dst_obj_metadata: Metadata for the destination object including bucket
and object name.
preconditions: Destination object preconditions for the request.
encryption_tuple: Optional utils.encryption_helper.CryptoKeyWrapper for
decrypting source objects and encrypting the destination
object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Composed object metadata.
"""
raise NotImplementedError('ComposeObject must be overloaded')
def DeleteObject(self,
bucket_name,
object_name,
preconditions=None,
generation=None,
provider=None):
"""Deletes an object.
Args:
bucket_name: Name of the containing bucket.
object_name: Name of the object to delete.
preconditions: Preconditions for the request.
generation: Generation (or version) of the object to delete; if None,
deletes the live object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
None.
"""
raise NotImplementedError('DeleteObject must be overloaded')
def WatchBucket(self,
bucket_name,
address,
channel_id,
token=None,
provider=None,
fields=None):
"""Creates a notification subscription for changes to objects in a bucket.
Args:
bucket_name: Bucket containing the objects.
address: Address to which to send notifications.
channel_id: Unique ID string for the channel.
token: If present, token string is delivered with each notification.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Channel metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Channel object describing the notification subscription.
"""
raise NotImplementedError('WatchBucket must be overloaded')
def StopChannel(self, channel_id, resource_id, provider=None):
"""Stops a notification channel.
Args:
channel_id: Unique ID string for the channel.
resource_id: Version-agnostic ID string for the channel.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
None.
"""
raise NotImplementedError('StopChannel must be overloaded')
def ListChannels(self, bucket_name, provider=None):
"""Lists object change notifications for a bucket.
Args:
bucket_name: Bucket containing the objects
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
None.
"""
raise NotImplementedError('ListChannels must be overloaded')
def GetProjectServiceAccount(self, project_number, provider=None):
"""Get the GCS-owned service account representing this project.
Args:
project_number: the project in question.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ServiceException for errors interacting with cloud storage providers.
Returns:
Service account object (with email_address string field)
"""
raise NotImplementedError('GetProjectServiceAccount must be overloaded')
def CreateNotificationConfig(self,
bucket_name,
pubsub_topic,
payload_format,
event_types=None,
custom_attributes=None,
object_name_prefix=None,
provider=None):
"""Creates a new notification with the specified parameters.
Args:
bucket_name: (Required) Name of the bucket.
pubsub_topic: (Required) Cloud Pub/Sub topic to which to publish.
payload_format: (Required) payload format, must be 'JSON' or 'NONE'.
event_types: (Opt) List of event type filters, e.g. 'OBJECT_FINALIZE'.
custom_attributes: (Opt) Dictionary of custom attributes.
object_name_prefix: (Opt) Filter on object name.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Notification object describing new notificationConfig
"""
raise NotImplementedError('CreateNotificationConfig must be overloaded')
def DeleteNotificationConfig(self, bucket_name, notification, provider=None):
"""Deletes a notification.
Args:
bucket_name: (Required) Name of the bucket.
notification: (Required) Integer name of the notification.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
None
"""
raise NotImplementedError('DeleteNotificationConfig must be overloaded')
def ListNotificationConfigs(self, bucket_name, provider=None):
"""Lists notification configs in a bucket.
Args:
bucket_name: Name of the bucket.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Yields:
List of notification objects.
"""
raise NotImplementedError('ListNotificationConfig must be overloaded')
def CreateHmacKey(self, project_id, service_account_email):
"""Creates a new HMAC key for the specified service account.
Args:
project_id: Project ID owning the service account for which the key is to
be created.
service_account_email: Email address of the service account for which to
create a key.
Raises:
NotImplementedError: not implemented TODO(tuckerkirven)
Returns
The key metadata and the secret key material.
"""
raise NotImplementedError('CreateHmacKey must be overloaded')
def DeleteHmacKey(self, project_id, access_id):
"""Deletes an HMAC key.
Args:
project_id: Project ID owning the requested key.
access_id: Name of the HMAC key to be deleted.
Raises:
NotImplementedError: not implemented TODO(tuckerkirven)
"""
raise NotImplementedError('DeleteHmacKey must be overloaded')
def GetHmacKey(self, project_id, access_id):
"""Retrieves an HMAC key's metadata.
Args:
project_id: Project ID owning the service account of the requested key.
access_id: Name of the HMAC key for which the metadata is being requested.
Raises:
NotImplementedError: not implemented TODO(tuckerkirven)
Returns:
Metadata for the specified key.
"""
raise NotImplementedError('GetHmacKey must be overloaded')
def ListHmacKeys(self,
project_id,
service_account_email,
show_deleted_keys=False):
"""Lists HMAC keys matching the criteria.
Args:
project_id: Name of the project from which to list HMAC keys.
service_account_email: If present, only keys for the given service
account will be returned.
show_deleted_keys: If set, show keys in the DELETED state.
Raises:
NotImplementedError: not implemented TODO(tuckerkirven)
Yields:
List of HMAC key metadata objects.
"""
raise NotImplementedError('ListHmacKeys must be overloaded')
def UpdateHmacKey(self, project_id, access_id, state, etag):
"""Updates the state of an HMAC key.
Args:
project_id: Project ID owning the service account of the updated key.
access_id: Name of the HMAC key being updated.
state: The state to which the key should be updated.
etag: None or a string matching the key's etag to ensure the appropriate
version of the key is updated.
Raises:
NotImplementedError: not implemented TODO(tuckerkirven)
Returns:
The updated key metadata.
"""
raise NotImplementedError('UpdateHmacKey must be overloaded')
class Preconditions(object):
"""Preconditions class for specifying preconditions to cloud API requests."""
def __init__(self, gen_match=None, meta_gen_match=None):
"""Instantiates a Preconditions object.
Args:
gen_match: Perform request only if generation of target object
matches the given integer. Ignored for bucket requests.
meta_gen_match: Perform request only if metageneration of target
object/bucket matches the given integer.
"""
self.gen_match = gen_match
self.meta_gen_match = meta_gen_match
class EncryptionException(Exception):
"""Exception raised when an encrypted resource cannot be decrypted."""
class ArgumentException(Exception):
"""Exception raised when arguments to a Cloud API method are invalid.
This exception is never raised as a result of a failed call to a cloud
storage provider.
"""
def __init__(self, reason):
Exception.__init__(self)
self.reason = reason
def __repr__(self):
return str(self)
def __str__(self):
return '%s: %s' % (self.__class__.__name__, self.reason)
class ProjectIdException(ArgumentException):
"""Exception raised when a Project ID argument is required but not present."""
class ServiceException(Exception):
"""Exception raised when a cloud storage provider request fails.
This exception is raised only as a result of a failed remote call.
"""
def __init__(self, reason, status=None, body=None):
Exception.__init__(self)
self.reason = reason
self.status = status
self.body = body
def __repr__(self):
return str(self)
def __str__(self):
message = '%s:' % self.__class__.__name__
if self.status:
message += ' %s' % self.status
message += ' %s' % self.reason
if self.body:
message += '\n%s' % self.body
return message
class RetryableServiceException(ServiceException):
"""Exception class for retryable exceptions."""
class ResumableDownloadException(RetryableServiceException):
"""Exception raised for res. downloads that can be retried later."""
class ResumableUploadException(RetryableServiceException):
"""Exception raised for res. uploads that can be retried w/ same upload ID."""
class ResumableUploadStartOverException(RetryableServiceException):
"""Exception raised for res. uploads that can be retried w/ new upload ID."""
class ResumableUploadAbortException(ServiceException):
"""Exception raised for resumable uploads that cannot be retried later."""
class AuthenticationException(ServiceException):
"""Exception raised for errors during the authentication process."""
class PreconditionException(ServiceException):
"""Exception raised for precondition failures."""
class NotFoundException(ServiceException):
"""Exception raised when a resource is not found (404)."""
class BucketNotFoundException(NotFoundException):
"""Exception raised when a bucket resource is not found (404)."""
def __init__(self, reason, bucket_name, status=None, body=None):
super(BucketNotFoundException, self).__init__(reason,
status=status,
body=body)
self.bucket_name = bucket_name
class NotEmptyException(ServiceException):
"""Exception raised when trying to delete a bucket is not empty."""
class BadRequestException(ServiceException):
"""Exception raised for malformed requests.
Where it is possible to detect invalid arguments prior to sending them
to the server, an ArgumentException should be raised instead.
"""
class AccessDeniedException(ServiceException):
"""Exception raised when authenticated user has insufficient access rights.
This is raised when the authentication process succeeded but the
authenticated user does not have access rights to the requested resource.
"""
class PublishPermissionDeniedException(ServiceException):
"""Exception raised when bucket does not have publish permission to a topic.
This is raised when a custom attempts to set up a notification config to a
Cloud Pub/Sub topic, but their GCS bucket does not have permission to
publish to the specified topic.
"""
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/cloud_api.py
| 0.964531 | 0.298447 |
cloud_api.py
|
pypi
|
"""pyCrypto Crypto-related routines for oauth2client."""
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Util.asn1 import DerSequence
from oauth2client import _helpers
class PyCryptoVerifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey: OpenSSL.crypto.PKey (or equiv), The public key to verify
with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return PKCS1_v1_5.new(self._pubkey).verify(
SHA256.new(message), signature)
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
Verifier instance.
"""
if is_x509_cert:
key_pem = _helpers._to_bytes(key_pem)
pemLines = key_pem.replace(b' ', b'').split()
certDer = _helpers._urlsafe_b64decode(b''.join(pemLines[1:-1]))
certSeq = DerSequence()
certSeq.decode(certDer)
tbsSeq = DerSequence()
tbsSeq.decode(certSeq[0])
pubkey = RSA.importKey(tbsSeq[6])
else:
pubkey = RSA.importKey(key_pem)
return PyCryptoVerifier(pubkey)
class PyCryptoSigner(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: string, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))
@staticmethod
def from_string(key, password='notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
Signer instance.
Raises:
NotImplementedError if the key isn't in PEM format.
"""
parsed_pem_key = _helpers._parse_pem_key(_helpers._to_bytes(key))
if parsed_pem_key:
pkey = RSA.importKey(parsed_pem_key)
else:
raise NotImplementedError(
'No key in PEM format was detected. This implementation '
'can only use the PyCrypto library for keys in PEM '
'format.')
return PyCryptoSigner(pkey)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/_pycrypto_crypt.py
| 0.915611 | 0.277477 |
_pycrypto_crypt.py
|
pypi
|
import base64
import functools
import inspect
import json
import logging
import os
import warnings
import six
from six.moves import urllib
logger = logging.getLogger(__name__)
POSITIONAL_WARNING = 'WARNING'
POSITIONAL_EXCEPTION = 'EXCEPTION'
POSITIONAL_IGNORE = 'IGNORE'
POSITIONAL_SET = frozenset([POSITIONAL_WARNING, POSITIONAL_EXCEPTION,
POSITIONAL_IGNORE])
positional_parameters_enforcement = POSITIONAL_WARNING
_SYM_LINK_MESSAGE = 'File: {0}: Is a symbolic link.'
_IS_DIR_MESSAGE = '{0}: Is a directory'
_MISSING_FILE_MESSAGE = 'Cannot access {0}: No such file or directory'
def positional(max_positional_args):
"""A decorator to declare that only the first N arguments my be positional.
This decorator makes it easy to support Python 3 style keyword-only
parameters. For example, in Python 3 it is possible to write::
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after ``*`` must be a keyword::
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example
^^^^^^^
To define a function like above, do::
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it becomes a
required keyword argument::
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter::
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
``self`` and ``cls``::
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
The positional decorator behavior is controlled by
``_helpers.positional_parameters_enforcement``, which may be set to
``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or
``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do
nothing, respectively, if a declaration is violated.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be
keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args
from being used as positional parameters.
Raises:
TypeError: if a key-word only argument is provided as a positional
parameter, but only if
_helpers.positional_parameters_enforcement is set to
POSITIONAL_EXCEPTION.
"""
def positional_decorator(wrapped):
@functools.wraps(wrapped)
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ''
if max_positional_args != 1:
plural_s = 's'
message = ('{function}() takes at most {args_max} positional '
'argument{plural} ({args_given} given)'.format(
function=wrapped.__name__,
args_max=max_positional_args,
args_given=len(args),
plural=plural_s))
if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
raise TypeError(message)
elif positional_parameters_enforcement == POSITIONAL_WARNING:
logger.warning(message)
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, six.integer_types):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
return positional(len(args) - len(defaults))(max_positional_args)
def scopes_to_string(scopes):
"""Converts scope value to a string.
If scopes is a string then it is simply passed through. If scopes is an
iterable then a string is returned that is all the individual scopes
concatenated with spaces.
Args:
scopes: string or iterable of strings, the scopes.
Returns:
The scopes formatted as a single string.
"""
if isinstance(scopes, six.string_types):
return scopes
else:
return ' '.join(scopes)
def string_to_scopes(scopes):
"""Converts stringifed scope value to a list.
If scopes is a list then it is simply passed through. If scopes is an
string then a list of each individual scope is returned.
Args:
scopes: a string or iterable of strings, the scopes.
Returns:
The scopes in a list.
"""
if not scopes:
return []
elif isinstance(scopes, six.string_types):
return scopes.split(' ')
else:
return scopes
def parse_unique_urlencoded(content):
"""Parses unique key-value parameters from urlencoded content.
Args:
content: string, URL-encoded key-value pairs.
Returns:
dict, The key-value pairs from ``content``.
Raises:
ValueError: if one of the keys is repeated.
"""
urlencoded_params = urllib.parse.parse_qs(content)
params = {}
for key, value in six.iteritems(urlencoded_params):
if len(value) != 1:
msg = ('URL-encoded content contains a repeated value:'
'%s -> %s' % (key, ', '.join(value)))
raise ValueError(msg)
params[key] = value[0]
return params
def update_query_params(uri, params):
"""Updates a URI with new query parameters.
If a given key from ``params`` is repeated in the ``uri``, then
the URI will be considered invalid and an error will occur.
If the URI is valid, then each value from ``params`` will
replace the corresponding value in the query parameters (if
it exists).
Args:
uri: string, A valid URI, with potential existing query parameters.
params: dict, A dictionary of query parameters.
Returns:
The same URI but with the new query parameters added.
"""
parts = urllib.parse.urlparse(uri)
query_params = parse_unique_urlencoded(parts.query)
query_params.update(params)
new_query = urllib.parse.urlencode(query_params)
new_parts = parts._replace(query=new_query)
return urllib.parse.urlunparse(new_parts)
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
return update_query_params(url, {name: value})
def validate_file(filename):
if os.path.islink(filename):
raise IOError(_SYM_LINK_MESSAGE.format(filename))
elif os.path.isdir(filename):
raise IOError(_IS_DIR_MESSAGE.format(filename))
elif not os.path.isfile(filename):
warnings.warn(_MISSING_FILE_MESSAGE.format(filename))
def _parse_pem_key(raw_key_input):
"""Identify and extract PEM keys.
Determines whether the given key is in the format of PEM key, and extracts
the relevant part of the key if it is.
Args:
raw_key_input: The contents of a private key file (either PEM or
PKCS12).
Returns:
string, The actual key if the contents are from a PEM file, or
else None.
"""
offset = raw_key_input.find(b'-----BEGIN ')
if offset != -1:
return raw_key_input[offset:]
def _json_encode(data):
return json.dumps(data, separators=(',', ':'))
def _to_bytes(value, encoding='ascii'):
"""Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python2 it does not modify ``unicode`` objects.
Args:
value: The string/bytes value to be converted.
encoding: The encoding to use to convert unicode to bytes. Defaults
to "ascii", which will not allow any characters from ordinals
larger than 127. Other useful values are "latin-1", which
which will only allows byte ordinals (up to 255) and "utf-8",
which will encode any unicode that needs to be.
Returns:
The original value converted to bytes (if unicode) or as passed in
if it started out as bytes.
Raises:
ValueError if the value could not be converted to bytes.
"""
result = (value.encode(encoding)
if isinstance(value, six.text_type) else value)
if isinstance(result, six.binary_type):
return result
else:
raise ValueError('{0!r} could not be converted to bytes'.format(value))
def _from_bytes(value):
"""Converts bytes to a string value, if necessary.
Args:
value: The string/bytes value to be converted.
Returns:
The original value converted to unicode (if bytes) or as passed in
if it started out as unicode.
Raises:
ValueError if the value could not be converted to unicode.
"""
result = (value.decode('utf-8')
if isinstance(value, six.binary_type) else value)
if isinstance(result, six.text_type):
return result
else:
raise ValueError(
'{0!r} could not be converted to unicode'.format(value))
def _urlsafe_b64encode(raw_bytes):
raw_bytes = _to_bytes(raw_bytes, encoding='utf-8')
return base64.urlsafe_b64encode(raw_bytes).rstrip(b'=')
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = _to_bytes(b64string)
padded = b64string + b'=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/_helpers.py
| 0.70202 | 0.182535 |
_helpers.py
|
pypi
|
from pyasn1.codec.der import decoder
from pyasn1_modules import pem
from pyasn1_modules.rfc2459 import Certificate
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
import six
from oauth2client import _helpers
_PKCS12_ERROR = r"""\
PKCS12 format is not supported by the RSA library.
Either install PyOpenSSL, or please convert .p12 format
to .pem format:
$ cat key.p12 | \
> openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
> openssl rsa > key.pem
"""
_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
_PKCS1_MARKER = ('-----BEGIN RSA PRIVATE KEY-----',
'-----END RSA PRIVATE KEY-----')
_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
'-----END PRIVATE KEY-----')
_PKCS8_SPEC = PrivateKeyInfo()
def _bit_list_to_bytes(bit_list):
"""Converts an iterable of 1's and 0's to bytes.
Combines the list 8 at a time, treating each group of 8 bits
as a single byte.
"""
num_bits = len(bit_list)
byte_vals = bytearray()
for start in six.moves.xrange(0, num_bits, 8):
curr_bits = bit_list[start:start + 8]
char_val = sum(val * digit
for val, digit in zip(_POW2, curr_bits))
byte_vals.append(char_val)
return bytes(byte_vals)
class RsaVerifier(object):
"""Verifies the signature on a message.
Args:
pubkey: rsa.key.PublicKey (or equiv), The public key to verify with.
"""
def __init__(self, pubkey):
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message. If
string, will be encoded to bytes as utf-8.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
try:
return rsa.pkcs1.verify(message, signature, self._pubkey)
except (ValueError, rsa.pkcs1.VerificationError):
return False
@classmethod
def from_string(cls, key_pem, is_x509_cert):
"""Construct an RsaVerifier instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
RsaVerifier instance.
Raises:
ValueError: if the key_pem can't be parsed. In either case, error
will begin with 'No PEM start marker'. If
``is_x509_cert`` is True, will fail to find the
"-----BEGIN CERTIFICATE-----" error, otherwise fails
to find "-----BEGIN RSA PUBLIC KEY-----".
"""
key_pem = _helpers._to_bytes(key_pem)
if is_x509_cert:
der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b'':
raise ValueError('Unused bytes', remaining)
cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
else:
pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
return cls(pubkey)
class RsaSigner(object):
"""Signs messages with a private key.
Args:
pkey: rsa.key.PrivateKey (or equiv), The private key to sign with.
"""
def __init__(self, pkey):
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return rsa.pkcs1.sign(message, self._key, 'SHA-256')
@classmethod
def from_string(cls, key, password='notasecret'):
"""Construct an RsaSigner instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
RsaSigner instance.
Raises:
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
"""
key = _helpers._from_bytes(key) # pem expects str in Py3
marker_id, key_bytes = pem.readPemBlocksFromFile(
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
if marker_id == 0:
pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
format='DER')
elif marker_id == 1:
key_info, remaining = decoder.decode(
key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b'':
raise ValueError('Unused bytes', remaining)
pkey_info = key_info.getComponentByName('privateKey')
pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
format='DER')
else:
raise ValueError('No key could be detected.')
return cls(pkey)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/_pure_python_crypt.py
| 0.769773 | 0.219108 |
_pure_python_crypt.py
|
pypi
|
import base64
import copy
import datetime
import json
import time
import oauth2client
from oauth2client import _helpers
from oauth2client import client
from oauth2client import crypt
from oauth2client import transport
_PASSWORD_DEFAULT = 'notasecret'
_PKCS12_KEY = '_private_key_pkcs12'
_PKCS12_ERROR = r"""
This library only implements PKCS#12 support via the pyOpenSSL library.
Either install pyOpenSSL, or please convert the .p12 file
to .pem format:
$ cat key.p12 | \
> openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
> openssl rsa > key.pem
"""
class ServiceAccountCredentials(client.AssertionCredentials):
"""Service Account credential for OAuth 2.0 signed JWT grants.
Supports
* JSON keyfile (typically contains a PKCS8 key stored as
PEM text)
* ``.p12`` key (stores PKCS12 key and certificate)
Makes an assertion to server using a signed JWT assertion in exchange
for an access token.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
Args:
service_account_email: string, The email associated with the
service account.
signer: ``crypt.Signer``, A signer which can be used to sign content.
scopes: List or string, (Optional) Scopes to use when acquiring
an access token.
private_key_id: string, (Optional) Private key identifier. Typically
only used with a JSON keyfile. Can be sent in the
header of a JWT token assertion.
client_id: string, (Optional) Client ID for the project that owns the
service account.
user_agent: string, (Optional) User agent to use when sending
request.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
kwargs: dict, Extra key-value pairs (both strings) to send in the
payload body when making an assertion.
"""
MAX_TOKEN_LIFETIME_SECS = 3600
"""Max lifetime of the token (one hour, in seconds)."""
NON_SERIALIZED_MEMBERS = (
frozenset(['_signer']) |
client.AssertionCredentials.NON_SERIALIZED_MEMBERS)
"""Members that aren't serialized when object is converted to JSON."""
# Can be over-ridden by factory constructors. Used for
# serialization/deserialization purposes.
_private_key_pkcs8_pem = None
_private_key_pkcs12 = None
_private_key_password = None
def __init__(self,
service_account_email,
signer,
scopes='',
private_key_id=None,
client_id=None,
user_agent=None,
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
**kwargs):
super(ServiceAccountCredentials, self).__init__(
None, user_agent=user_agent, token_uri=token_uri,
revoke_uri=revoke_uri)
self._service_account_email = service_account_email
self._signer = signer
self._scopes = _helpers.scopes_to_string(scopes)
self._private_key_id = private_key_id
self.client_id = client_id
self._user_agent = user_agent
self._kwargs = kwargs
def _to_json(self, strip, to_serialize=None):
"""Utility function that creates JSON repr. of a credentials object.
Over-ride is needed since PKCS#12 keys will not in general be JSON
serializable.
Args:
strip: array, An array of names of members to exclude from the
JSON.
to_serialize: dict, (Optional) The properties for this object
that will be serialized. This allows callers to
modify before serializing.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
if to_serialize is None:
to_serialize = copy.copy(self.__dict__)
pkcs12_val = to_serialize.get(_PKCS12_KEY)
if pkcs12_val is not None:
to_serialize[_PKCS12_KEY] = base64.b64encode(pkcs12_val)
return super(ServiceAccountCredentials, self)._to_json(
strip, to_serialize=to_serialize)
@classmethod
def _from_parsed_json_keyfile(cls, keyfile_dict, scopes,
token_uri=None, revoke_uri=None):
"""Helper for factory constructors from JSON keyfile.
Args:
keyfile_dict: dict-like object, The parsed dictionary-like object
containing the contents of the JSON keyfile.
scopes: List or string, Scopes to use when acquiring an
access token.
token_uri: string, URI for OAuth 2.0 provider token endpoint.
If unset and not present in keyfile_dict, defaults
to Google's endpoints.
revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.
If unset and not present in keyfile_dict, defaults
to Google's endpoints.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile contents.
Raises:
ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.
KeyError, if one of the expected keys is not present in
the keyfile.
"""
creds_type = keyfile_dict.get('type')
if creds_type != client.SERVICE_ACCOUNT:
raise ValueError('Unexpected credentials type', creds_type,
'Expected', client.SERVICE_ACCOUNT)
service_account_email = keyfile_dict['client_email']
private_key_pkcs8_pem = keyfile_dict['private_key']
private_key_id = keyfile_dict['private_key_id']
client_id = keyfile_dict['client_id']
if not token_uri:
token_uri = keyfile_dict.get('token_uri',
oauth2client.GOOGLE_TOKEN_URI)
if not revoke_uri:
revoke_uri = keyfile_dict.get('revoke_uri',
oauth2client.GOOGLE_REVOKE_URI)
signer = crypt.Signer.from_string(private_key_pkcs8_pem)
credentials = cls(service_account_email, signer, scopes=scopes,
private_key_id=private_key_id,
client_id=client_id, token_uri=token_uri,
revoke_uri=revoke_uri)
credentials._private_key_pkcs8_pem = private_key_pkcs8_pem
return credentials
@classmethod
def from_json_keyfile_name(cls, filename, scopes='',
token_uri=None, revoke_uri=None):
"""Factory constructor from JSON keyfile by name.
Args:
filename: string, The location of the keyfile.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for OAuth 2.0 provider token endpoint.
If unset and not present in the key file, defaults
to Google's endpoints.
revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.
If unset and not present in the key file, defaults
to Google's endpoints.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.
KeyError, if one of the expected keys is not present in
the keyfile.
"""
with open(filename, 'r') as file_obj:
client_credentials = json.load(file_obj)
return cls._from_parsed_json_keyfile(client_credentials, scopes,
token_uri=token_uri,
revoke_uri=revoke_uri)
@classmethod
def from_json_keyfile_dict(cls, keyfile_dict, scopes='',
token_uri=None, revoke_uri=None):
"""Factory constructor from parsed JSON keyfile.
Args:
keyfile_dict: dict-like object, The parsed dictionary-like object
containing the contents of the JSON keyfile.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for OAuth 2.0 provider token endpoint.
If unset and not present in keyfile_dict, defaults
to Google's endpoints.
revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.
If unset and not present in keyfile_dict, defaults
to Google's endpoints.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.
KeyError, if one of the expected keys is not present in
the keyfile.
"""
return cls._from_parsed_json_keyfile(keyfile_dict, scopes,
token_uri=token_uri,
revoke_uri=revoke_uri)
@classmethod
def _from_p12_keyfile_contents(cls, service_account_email,
private_key_pkcs12,
private_key_password=None, scopes='',
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
"""Factory constructor from JSON keyfile.
Args:
service_account_email: string, The email associated with the
service account.
private_key_pkcs12: string, The contents of a PKCS#12 keyfile.
private_key_password: string, (Optional) Password for PKCS#12
private key. Defaults to ``notasecret``.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0
provider can be used.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
NotImplementedError if pyOpenSSL is not installed / not the
active crypto library.
"""
if private_key_password is None:
private_key_password = _PASSWORD_DEFAULT
if crypt.Signer is not crypt.OpenSSLSigner:
raise NotImplementedError(_PKCS12_ERROR)
signer = crypt.Signer.from_string(private_key_pkcs12,
private_key_password)
credentials = cls(service_account_email, signer, scopes=scopes,
token_uri=token_uri, revoke_uri=revoke_uri)
credentials._private_key_pkcs12 = private_key_pkcs12
credentials._private_key_password = private_key_password
return credentials
@classmethod
def from_p12_keyfile(cls, service_account_email, filename,
private_key_password=None, scopes='',
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
"""Factory constructor from JSON keyfile.
Args:
service_account_email: string, The email associated with the
service account.
filename: string, The location of the PKCS#12 keyfile.
private_key_password: string, (Optional) Password for PKCS#12
private key. Defaults to ``notasecret``.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0
provider can be used.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
NotImplementedError if pyOpenSSL is not installed / not the
active crypto library.
"""
with open(filename, 'rb') as file_obj:
private_key_pkcs12 = file_obj.read()
return cls._from_p12_keyfile_contents(
service_account_email, private_key_pkcs12,
private_key_password=private_key_password, scopes=scopes,
token_uri=token_uri, revoke_uri=revoke_uri)
@classmethod
def from_p12_keyfile_buffer(cls, service_account_email, file_buffer,
private_key_password=None, scopes='',
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
"""Factory constructor from JSON keyfile.
Args:
service_account_email: string, The email associated with the
service account.
file_buffer: stream, A buffer that implements ``read()``
and contains the PKCS#12 key contents.
private_key_password: string, (Optional) Password for PKCS#12
private key. Defaults to ``notasecret``.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0
provider can be used.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
NotImplementedError if pyOpenSSL is not installed / not the
active crypto library.
"""
private_key_pkcs12 = file_buffer.read()
return cls._from_p12_keyfile_contents(
service_account_email, private_key_pkcs12,
private_key_password=private_key_password, scopes=scopes,
token_uri=token_uri, revoke_uri=revoke_uri)
def _generate_assertion(self):
"""Generate the assertion that will be used in the request."""
now = int(time.time())
payload = {
'aud': self.token_uri,
'scope': self._scopes,
'iat': now,
'exp': now + self.MAX_TOKEN_LIFETIME_SECS,
'iss': self._service_account_email,
}
payload.update(self._kwargs)
return crypt.make_signed_jwt(self._signer, payload,
key_id=self._private_key_id)
def sign_blob(self, blob):
"""Cryptographically sign a blob (of bytes).
Implements abstract method
:meth:`oauth2client.client.AssertionCredentials.sign_blob`.
Args:
blob: bytes, Message to be signed.
Returns:
tuple, A pair of the private key ID used to sign the blob and
the signed contents.
"""
return self._private_key_id, self._signer.sign(blob)
@property
def service_account_email(self):
"""Get the email for the current service account.
Returns:
string, The email associated with the service account.
"""
return self._service_account_email
@property
def serialization_data(self):
# NOTE: This is only useful for JSON keyfile.
return {
'type': 'service_account',
'client_email': self._service_account_email,
'private_key_id': self._private_key_id,
'private_key': self._private_key_pkcs8_pem,
'client_id': self.client_id,
}
@classmethod
def from_json(cls, json_data):
"""Deserialize a JSON-serialized instance.
Inverse to :meth:`to_json`.
Args:
json_data: dict or string, Serialized JSON (as a string or an
already parsed dictionary) representing a credential.
Returns:
ServiceAccountCredentials from the serialized data.
"""
if not isinstance(json_data, dict):
json_data = json.loads(_helpers._from_bytes(json_data))
private_key_pkcs8_pem = None
pkcs12_val = json_data.get(_PKCS12_KEY)
password = None
if pkcs12_val is None:
private_key_pkcs8_pem = json_data['_private_key_pkcs8_pem']
signer = crypt.Signer.from_string(private_key_pkcs8_pem)
else:
# NOTE: This assumes that private_key_pkcs8_pem is not also
# in the serialized data. This would be very incorrect
# state.
pkcs12_val = base64.b64decode(pkcs12_val)
password = json_data['_private_key_password']
signer = crypt.Signer.from_string(pkcs12_val, password)
credentials = cls(
json_data['_service_account_email'],
signer,
scopes=json_data['_scopes'],
private_key_id=json_data['_private_key_id'],
client_id=json_data['client_id'],
user_agent=json_data['_user_agent'],
**json_data['_kwargs']
)
if private_key_pkcs8_pem is not None:
credentials._private_key_pkcs8_pem = private_key_pkcs8_pem
if pkcs12_val is not None:
credentials._private_key_pkcs12 = pkcs12_val
if password is not None:
credentials._private_key_password = password
credentials.invalid = json_data['invalid']
credentials.access_token = json_data['access_token']
credentials.token_uri = json_data['token_uri']
credentials.revoke_uri = json_data['revoke_uri']
token_expiry = json_data.get('token_expiry', None)
if token_expiry is not None:
credentials.token_expiry = datetime.datetime.strptime(
token_expiry, client.EXPIRY_FORMAT)
return credentials
def create_scoped_required(self):
return not self._scopes
def create_scoped(self, scopes):
result = self.__class__(self._service_account_email,
self._signer,
scopes=scopes,
private_key_id=self._private_key_id,
client_id=self.client_id,
user_agent=self._user_agent,
**self._kwargs)
result.token_uri = self.token_uri
result.revoke_uri = self.revoke_uri
result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
result._private_key_pkcs12 = self._private_key_pkcs12
result._private_key_password = self._private_key_password
return result
def create_with_claims(self, claims):
"""Create credentials that specify additional claims.
Args:
claims: dict, key-value pairs for claims.
Returns:
ServiceAccountCredentials, a copy of the current service account
credentials with updated claims to use when obtaining access
tokens.
"""
new_kwargs = dict(self._kwargs)
new_kwargs.update(claims)
result = self.__class__(self._service_account_email,
self._signer,
scopes=self._scopes,
private_key_id=self._private_key_id,
client_id=self.client_id,
user_agent=self._user_agent,
**new_kwargs)
result.token_uri = self.token_uri
result.revoke_uri = self.revoke_uri
result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
result._private_key_pkcs12 = self._private_key_pkcs12
result._private_key_password = self._private_key_password
return result
def create_delegated(self, sub):
"""Create credentials that act as domain-wide delegation of authority.
Use the ``sub`` parameter as the subject to delegate on behalf of
that user.
For example::
>>> account_sub = '[email protected]'
>>> delegate_creds = creds.create_delegated(account_sub)
Args:
sub: string, An email address that this service account will
act on behalf of (via domain-wide delegation).
Returns:
ServiceAccountCredentials, a copy of the current service account
updated to act on behalf of ``sub``.
"""
return self.create_with_claims({'sub': sub})
def _datetime_to_secs(utc_time):
# TODO(issue 298): use time_delta.total_seconds()
# time_delta.total_seconds() not supported in Python 2.6
epoch = datetime.datetime(1970, 1, 1)
time_delta = utc_time - epoch
return time_delta.days * 86400 + time_delta.seconds
class _JWTAccessCredentials(ServiceAccountCredentials):
"""Self signed JWT credentials.
Makes an assertion to server using a self signed JWT from service account
credentials. These credentials do NOT use OAuth 2.0 and instead
authenticate directly.
"""
_MAX_TOKEN_LIFETIME_SECS = 3600
"""Max lifetime of the token (one hour, in seconds)."""
def __init__(self,
service_account_email,
signer,
scopes=None,
private_key_id=None,
client_id=None,
user_agent=None,
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
additional_claims=None):
if additional_claims is None:
additional_claims = {}
super(_JWTAccessCredentials, self).__init__(
service_account_email,
signer,
private_key_id=private_key_id,
client_id=client_id,
user_agent=user_agent,
token_uri=token_uri,
revoke_uri=revoke_uri,
**additional_claims)
def authorize(self, http):
"""Authorize an httplib2.Http instance with a JWT assertion.
Unless specified, the 'aud' of the assertion will be the base
uri of the request.
Args:
http: An instance of ``httplib2.Http`` or something that acts
like it.
Returns:
A modified instance of http that was passed in.
Example::
h = httplib2.Http()
h = credentials.authorize(h)
"""
transport.wrap_http_for_jwt_access(self, http)
return http
def get_access_token(self, http=None, additional_claims=None):
"""Create a signed jwt.
Args:
http: unused
additional_claims: dict, additional claims to add to
the payload of the JWT.
Returns:
An AccessTokenInfo with the signed jwt
"""
if additional_claims is None:
if self.access_token is None or self.access_token_expired:
self.refresh(None)
return client.AccessTokenInfo(
access_token=self.access_token, expires_in=self._expires_in())
else:
# Create a 1 time token
token, unused_expiry = self._create_token(additional_claims)
return client.AccessTokenInfo(
access_token=token, expires_in=self._MAX_TOKEN_LIFETIME_SECS)
def revoke(self, http):
"""Cannot revoke JWTAccessCredentials tokens."""
pass
def create_scoped_required(self):
# JWTAccessCredentials are unscoped by definition
return True
def create_scoped(self, scopes, token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
# Returns an OAuth2 credentials with the given scope
result = ServiceAccountCredentials(self._service_account_email,
self._signer,
scopes=scopes,
private_key_id=self._private_key_id,
client_id=self.client_id,
user_agent=self._user_agent,
token_uri=token_uri,
revoke_uri=revoke_uri,
**self._kwargs)
if self._private_key_pkcs8_pem is not None:
result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
if self._private_key_pkcs12 is not None:
result._private_key_pkcs12 = self._private_key_pkcs12
if self._private_key_password is not None:
result._private_key_password = self._private_key_password
return result
def refresh(self, http):
"""Refreshes the access_token.
The HTTP object is unused since no request needs to be made to
get a new token, it can just be generated locally.
Args:
http: unused HTTP object
"""
self._refresh(None)
def _refresh(self, http):
"""Refreshes the access_token.
Args:
http: unused HTTP object
"""
self.access_token, self.token_expiry = self._create_token()
def _create_token(self, additional_claims=None):
now = client._UTCNOW()
lifetime = datetime.timedelta(seconds=self._MAX_TOKEN_LIFETIME_SECS)
expiry = now + lifetime
payload = {
'iat': _datetime_to_secs(now),
'exp': _datetime_to_secs(expiry),
'iss': self._service_account_email,
'sub': self._service_account_email
}
payload.update(self._kwargs)
if additional_claims is not None:
payload.update(additional_claims)
jwt = crypt.make_signed_jwt(self._signer, payload,
key_id=self._private_key_id)
return jwt.decode('ascii'), expiry
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/service_account.py
| 0.720958 | 0.157105 |
service_account.py
|
pypi
|
"""OpenSSL Crypto-related routines for oauth2client."""
from OpenSSL import crypto
from oauth2client import _helpers
class OpenSSLVerifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey: OpenSSL.crypto.PKey, The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message. If string,
will be encoded to bytes as utf-8.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
signature = _helpers._to_bytes(signature, encoding='utf-8')
try:
crypto.verify(self._pubkey, signature, message, 'sha256')
return True
except crypto.Error:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
Verifier instance.
Raises:
OpenSSL.crypto.Error: if the key_pem can't be parsed.
"""
key_pem = _helpers._to_bytes(key_pem)
if is_x509_cert:
pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
else:
pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
return OpenSSLVerifier(pubkey)
class OpenSSLSigner(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey: OpenSSL.crypto.PKey (or equiv), The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return crypto.sign(self._key, message, 'sha256')
@staticmethod
def from_string(key, password=b'notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in PKCS12 or PEM format.
password: string, password for the private key file.
Returns:
Signer instance.
Raises:
OpenSSL.crypto.Error if the key can't be parsed.
"""
key = _helpers._to_bytes(key)
parsed_pem_key = _helpers._parse_pem_key(key)
if parsed_pem_key:
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, parsed_pem_key)
else:
password = _helpers._to_bytes(password, encoding='utf-8')
pkey = crypto.load_pkcs12(key, password).get_privatekey()
return OpenSSLSigner(pkey)
def pkcs12_key_as_pem(private_key_bytes, private_key_password):
"""Convert the contents of a PKCS#12 key to PEM using pyOpenSSL.
Args:
private_key_bytes: Bytes. PKCS#12 key in DER format.
private_key_password: String. Password for PKCS#12 key.
Returns:
String. PEM contents of ``private_key_bytes``.
"""
private_key_password = _helpers._to_bytes(private_key_password)
pkcs12 = crypto.load_pkcs12(private_key_bytes, private_key_password)
return crypto.dump_privatekey(crypto.FILETYPE_PEM,
pkcs12.get_privatekey())
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/_openssl_crypt.py
| 0.906875 | 0.263718 |
_openssl_crypt.py
|
pypi
|
import threading
import keyring
from oauth2client import client
class Storage(client.Storage):
"""Store and retrieve a single credential to and from the keyring.
To use this module you must have the keyring module installed. See
<http://pypi.python.org/pypi/keyring/>. This is an optional module and is
not installed with oauth2client by default because it does not work on all
the platforms that oauth2client supports, such as Google App Engine.
The keyring module <http://pypi.python.org/pypi/keyring/> is a
cross-platform library for access the keyring capabilities of the local
system. The user will be prompted for their keyring password when this
module is used, and the manner in which the user is prompted will vary per
platform.
Usage::
from oauth2client import keyring_storage
s = keyring_storage.Storage('name_of_application', 'user1')
credentials = s.get()
"""
def __init__(self, service_name, user_name):
"""Constructor.
Args:
service_name: string, The name of the service under which the
credentials are stored.
user_name: string, The name of the user to store credentials for.
"""
super(Storage, self).__init__(lock=threading.Lock())
self._service_name = service_name
self._user_name = user_name
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
content = keyring.get_password(self._service_name, self._user_name)
if content is not None:
try:
credentials = client.Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name,
credentials.to_json())
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name, '')
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/oauth2client/oauth2client/contrib/keyring_storage.py
| 0.654343 | 0.254961 |
keyring_storage.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.