body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
ab251e511d598dd68211c975bc2bc57ace6e3b10d3aeb12321b6f6a42e8a1af0
|
def error(name=None, message=''):
'\n If name is None Then return empty dict\n\n Otherwise raise an exception with __name__ from name, message from message\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-wheel error\n salt-wheel error.error name="Exception" message="This is an error."\n '
ret = {}
if (name is not None):
salt.utils.error.raise_error(name=name, message=message)
return ret
|
If name is None Then return empty dict
Otherwise raise an exception with __name__ from name, message from message
CLI Example:
.. code-block:: bash
salt-wheel error
salt-wheel error.error name="Exception" message="This is an error."
|
salt/wheel/error.py
|
error
|
preoctopus/salt
| 3 |
python
|
def error(name=None, message=):
'\n If name is None Then return empty dict\n\n Otherwise raise an exception with __name__ from name, message from message\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-wheel error\n salt-wheel error.error name="Exception" message="This is an error."\n '
ret = {}
if (name is not None):
salt.utils.error.raise_error(name=name, message=message)
return ret
|
def error(name=None, message=):
'\n If name is None Then return empty dict\n\n Otherwise raise an exception with __name__ from name, message from message\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-wheel error\n salt-wheel error.error name="Exception" message="This is an error."\n '
ret = {}
if (name is not None):
salt.utils.error.raise_error(name=name, message=message)
return ret<|docstring|>If name is None Then return empty dict
Otherwise raise an exception with __name__ from name, message from message
CLI Example:
.. code-block:: bash
salt-wheel error
salt-wheel error.error name="Exception" message="This is an error."<|endoftext|>
|
17cd771c34bc7122f3ae81fc6a9feb7feb77189b2bfb4dad7abe12a8b5fd289c
|
def get_keras_logreg(input_dim, output_dim=2):
'Create a simple logistic regression model (using keras)\n '
model = tf.keras.Sequential()
if (output_dim == 1):
loss = 'binary_crossentropy'
activation = tf.nn.sigmoid
else:
loss = 'categorical_crossentropy'
activation = tf.nn.softmax
dense = tf.keras.layers.Dense(units=output_dim, input_dim=input_dim, activation=activation, kernel_regularizer=tf.keras.regularizers.l2(0.001))
model.add(dense)
opt = tf.keras.optimizers.Adam(lr=0.01)
model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
return model
|
Create a simple logistic regression model (using keras)
|
server/verifier/keraslogreg.py
|
get_keras_logreg
|
AlessandraBotto/ruler
| 20 |
python
|
def get_keras_logreg(input_dim, output_dim=2):
'\n '
model = tf.keras.Sequential()
if (output_dim == 1):
loss = 'binary_crossentropy'
activation = tf.nn.sigmoid
else:
loss = 'categorical_crossentropy'
activation = tf.nn.softmax
dense = tf.keras.layers.Dense(units=output_dim, input_dim=input_dim, activation=activation, kernel_regularizer=tf.keras.regularizers.l2(0.001))
model.add(dense)
opt = tf.keras.optimizers.Adam(lr=0.01)
model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
return model
|
def get_keras_logreg(input_dim, output_dim=2):
'\n '
model = tf.keras.Sequential()
if (output_dim == 1):
loss = 'binary_crossentropy'
activation = tf.nn.sigmoid
else:
loss = 'categorical_crossentropy'
activation = tf.nn.softmax
dense = tf.keras.layers.Dense(units=output_dim, input_dim=input_dim, activation=activation, kernel_regularizer=tf.keras.regularizers.l2(0.001))
model.add(dense)
opt = tf.keras.optimizers.Adam(lr=0.01)
model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
return model<|docstring|>Create a simple logistic regression model (using keras)<|endoftext|>
|
48879ec31b3ce2cfe46a61806319dac15fdb9eb484d003fa5cb1aa3d359c72a4
|
def get_keras_early_stopping(patience=10):
'Create early stopping condition\n '
return tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=10, verbose=1, restore_best_weights=True)
|
Create early stopping condition
|
server/verifier/keraslogreg.py
|
get_keras_early_stopping
|
AlessandraBotto/ruler
| 20 |
python
|
def get_keras_early_stopping(patience=10):
'\n '
return tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=10, verbose=1, restore_best_weights=True)
|
def get_keras_early_stopping(patience=10):
'\n '
return tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=10, verbose=1, restore_best_weights=True)<|docstring|>Create early stopping condition<|endoftext|>
|
eab5e4a8ac530ab1b39008ed18282db3aa8095c11d811bc892a87b50944f2615
|
def __init__(self, cardinality=2):
'Summary\n \n Args:\n cardinality (int, optional): Number of output classes\n '
snork_seed(123)
tf.random.set_seed(123)
np_seed(123)
py_seed(123)
self.cardinality = cardinality
self.keras_model = None
|
Summary
Args:
cardinality (int, optional): Number of output classes
|
server/verifier/keraslogreg.py
|
__init__
|
AlessandraBotto/ruler
| 20 |
python
|
def __init__(self, cardinality=2):
'Summary\n \n Args:\n cardinality (int, optional): Number of output classes\n '
snork_seed(123)
tf.random.set_seed(123)
np_seed(123)
py_seed(123)
self.cardinality = cardinality
self.keras_model = None
|
def __init__(self, cardinality=2):
'Summary\n \n Args:\n cardinality (int, optional): Number of output classes\n '
snork_seed(123)
tf.random.set_seed(123)
np_seed(123)
py_seed(123)
self.cardinality = cardinality
self.keras_model = None<|docstring|>Summary
Args:
cardinality (int, optional): Number of output classes<|endoftext|>
|
5a75baebfe20fe82c87f3f730476d3b6f6272009337b01bed03b702df1e9ad42
|
def fit(self, X_train, Y_train, X_valid, Y_valid):
'Train the model using the given training and validation data.\n \n Args:\n X_train (list(str)): Training text examples, length n\n Y_train (matrix): Training labels, size n*m, where m is the cardinality\n X_valid (list(str)): Validation test examples, length p\n Y_valid (matrix): Validation labels, size p*m\n '
if (self.keras_model is None):
self.vectorizer = CountVectorizer(ngram_range=(1, 2))
self.vectorizer.fit(X_train)
X_train = self.vectorizer.transform(X_train)
X_valid = self.vectorizer.transform(X_valid)
if (self.keras_model is None):
self.keras_model = get_keras_logreg(input_dim=X_train.shape[1], output_dim=self.cardinality)
self.keras_model.fit(x=X_train, y=Y_train, validation_data=(X_valid, Y_valid), callbacks=[get_keras_early_stopping()], epochs=20, verbose=0)
|
Train the model using the given training and validation data.
Args:
X_train (list(str)): Training text examples, length n
Y_train (matrix): Training labels, size n*m, where m is the cardinality
X_valid (list(str)): Validation test examples, length p
Y_valid (matrix): Validation labels, size p*m
|
server/verifier/keraslogreg.py
|
fit
|
AlessandraBotto/ruler
| 20 |
python
|
def fit(self, X_train, Y_train, X_valid, Y_valid):
'Train the model using the given training and validation data.\n \n Args:\n X_train (list(str)): Training text examples, length n\n Y_train (matrix): Training labels, size n*m, where m is the cardinality\n X_valid (list(str)): Validation test examples, length p\n Y_valid (matrix): Validation labels, size p*m\n '
if (self.keras_model is None):
self.vectorizer = CountVectorizer(ngram_range=(1, 2))
self.vectorizer.fit(X_train)
X_train = self.vectorizer.transform(X_train)
X_valid = self.vectorizer.transform(X_valid)
if (self.keras_model is None):
self.keras_model = get_keras_logreg(input_dim=X_train.shape[1], output_dim=self.cardinality)
self.keras_model.fit(x=X_train, y=Y_train, validation_data=(X_valid, Y_valid), callbacks=[get_keras_early_stopping()], epochs=20, verbose=0)
|
def fit(self, X_train, Y_train, X_valid, Y_valid):
'Train the model using the given training and validation data.\n \n Args:\n X_train (list(str)): Training text examples, length n\n Y_train (matrix): Training labels, size n*m, where m is the cardinality\n X_valid (list(str)): Validation test examples, length p\n Y_valid (matrix): Validation labels, size p*m\n '
if (self.keras_model is None):
self.vectorizer = CountVectorizer(ngram_range=(1, 2))
self.vectorizer.fit(X_train)
X_train = self.vectorizer.transform(X_train)
X_valid = self.vectorizer.transform(X_valid)
if (self.keras_model is None):
self.keras_model = get_keras_logreg(input_dim=X_train.shape[1], output_dim=self.cardinality)
self.keras_model.fit(x=X_train, y=Y_train, validation_data=(X_valid, Y_valid), callbacks=[get_keras_early_stopping()], epochs=20, verbose=0)<|docstring|>Train the model using the given training and validation data.
Args:
X_train (list(str)): Training text examples, length n
Y_train (matrix): Training labels, size n*m, where m is the cardinality
X_valid (list(str)): Validation test examples, length p
Y_valid (matrix): Validation labels, size p*m<|endoftext|>
|
62a95ce2ccfc18107b93b2a45cc811312266e3d0ec5efc711571a71596087bcc
|
def predict(self, X):
'Predict probabilities that each sample in X belongs to each class.\n \n Args:\n X (list(str)): Texts to predict class, length n\n \n Returns:\n matrix: size n*m, where m is the cardinality of the model\n '
X_v = self.vectorizer.transform(X)
return self.keras_model.predict(x=X_v)
|
Predict probabilities that each sample in X belongs to each class.
Args:
X (list(str)): Texts to predict class, length n
Returns:
matrix: size n*m, where m is the cardinality of the model
|
server/verifier/keraslogreg.py
|
predict
|
AlessandraBotto/ruler
| 20 |
python
|
def predict(self, X):
'Predict probabilities that each sample in X belongs to each class.\n \n Args:\n X (list(str)): Texts to predict class, length n\n \n Returns:\n matrix: size n*m, where m is the cardinality of the model\n '
X_v = self.vectorizer.transform(X)
return self.keras_model.predict(x=X_v)
|
def predict(self, X):
'Predict probabilities that each sample in X belongs to each class.\n \n Args:\n X (list(str)): Texts to predict class, length n\n \n Returns:\n matrix: size n*m, where m is the cardinality of the model\n '
X_v = self.vectorizer.transform(X)
return self.keras_model.predict(x=X_v)<|docstring|>Predict probabilities that each sample in X belongs to each class.
Args:
X (list(str)): Texts to predict class, length n
Returns:
matrix: size n*m, where m is the cardinality of the model<|endoftext|>
|
9f244454b590df5740a096f461692081134551308216e0fd25ff88a60fec78fc
|
@classmethod
def handle(cls, value, context, **kwargs):
'Retrieve a variable from the variable definition.\n\n The value is retrieved from the variables passed to Runway using\n either a variables file or the ``variables`` directive of the\n config file.\n\n Args:\n value: The value passed to the Lookup.\n variables: The resolved variables pass to Runway.\n\n Raises:\n ValueError: Unable to find a value for the provided query and\n a default value was not provided.\n\n '
(query, args) = cls.parse(value)
variables = kwargs['variables']
result = variables.find(query, default=args.pop('default', ''))
if (result != ''):
return cls.format_results(result, **args)
raise ValueError('"{}" does not exist in the variable definition'.format(query))
|
Retrieve a variable from the variable definition.
The value is retrieved from the variables passed to Runway using
either a variables file or the ``variables`` directive of the
config file.
Args:
value: The value passed to the Lookup.
variables: The resolved variables pass to Runway.
Raises:
ValueError: Unable to find a value for the provided query and
a default value was not provided.
|
runway/lookups/handlers/var.py
|
handle
|
pataraco/runway
| 1 |
python
|
@classmethod
def handle(cls, value, context, **kwargs):
'Retrieve a variable from the variable definition.\n\n The value is retrieved from the variables passed to Runway using\n either a variables file or the ``variables`` directive of the\n config file.\n\n Args:\n value: The value passed to the Lookup.\n variables: The resolved variables pass to Runway.\n\n Raises:\n ValueError: Unable to find a value for the provided query and\n a default value was not provided.\n\n '
(query, args) = cls.parse(value)
variables = kwargs['variables']
result = variables.find(query, default=args.pop('default', ))
if (result != ):
return cls.format_results(result, **args)
raise ValueError('"{}" does not exist in the variable definition'.format(query))
|
@classmethod
def handle(cls, value, context, **kwargs):
'Retrieve a variable from the variable definition.\n\n The value is retrieved from the variables passed to Runway using\n either a variables file or the ``variables`` directive of the\n config file.\n\n Args:\n value: The value passed to the Lookup.\n variables: The resolved variables pass to Runway.\n\n Raises:\n ValueError: Unable to find a value for the provided query and\n a default value was not provided.\n\n '
(query, args) = cls.parse(value)
variables = kwargs['variables']
result = variables.find(query, default=args.pop('default', ))
if (result != ):
return cls.format_results(result, **args)
raise ValueError('"{}" does not exist in the variable definition'.format(query))<|docstring|>Retrieve a variable from the variable definition.
The value is retrieved from the variables passed to Runway using
either a variables file or the ``variables`` directive of the
config file.
Args:
value: The value passed to the Lookup.
variables: The resolved variables pass to Runway.
Raises:
ValueError: Unable to find a value for the provided query and
a default value was not provided.<|endoftext|>
|
1c08afeedad14809cdb9381ef64001fd52092c06c33b21894a2e173accd39cbd
|
def slicer(data, affine=None, value_range=None, opacity=1.0, lookup_colormap=None):
' Cuts 3D scalar or rgb volumes into 2D images\n\n Parameters\n ----------\n data : array, shape (X, Y, Z) or (X, Y, Z, 3)\n A grayscale or rgb 4D volume as a numpy array.\n affine : array, shape (4, 4)\n Grid to space (usually RAS 1mm) transformation matrix. Default is None.\n If None then the identity matrix is used.\n value_range : None or tuple (2,)\n If None then the values will be interpolated from (data.min(),\n data.max()) to (0, 255). Otherwise from (value_range[0],\n value_range[1]) to (0, 255).\n opacity : float\n Opacity of 0 means completely transparent and 1 completely visible.\n lookup_colormap : vtkLookupTable\n If None (default) then a grayscale map is created.\n\n Returns\n -------\n image_actor : ImageActor\n An object that is capable of displaying different parts of the volume\n as slices. The key method of this object is ``display_extent`` where\n one can input grid coordinates and display the slice in space (or grid)\n coordinates as calculated by the affine parameter.\n\n '
if (data.ndim != 3):
if (data.ndim == 4):
if (data.shape[3] != 3):
raise ValueError('Only RGB 3D arrays are currently supported.')
else:
nb_components = 3
else:
raise ValueError('Only 3D arrays are currently supported.')
else:
nb_components = 1
if (value_range is None):
vol = np.interp(data, xp=[data.min(), data.max()], fp=[0, 255])
else:
vol = np.interp(data, xp=[value_range[0], value_range[1]], fp=[0, 255])
vol = vol.astype('uint8')
im = vtk.vtkImageData()
if (major_version <= 5):
im.SetScalarTypeToUnsignedChar()
(I, J, K) = vol.shape[:3]
im.SetDimensions(I, J, K)
voxsz = (1.0, 1.0, 1.0)
im.SetSpacing(voxsz[2], voxsz[0], voxsz[1])
if (major_version <= 5):
im.AllocateScalars()
im.SetNumberOfScalarComponents(nb_components)
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, nb_components)
vol = np.swapaxes(vol, 0, 2)
vol = np.ascontiguousarray(vol)
if (nb_components == 1):
vol = vol.ravel()
else:
vol = np.reshape(vol, [np.prod(vol.shape[:3]), vol.shape[3]])
uchar_array = numpy_support.numpy_to_vtk(vol, deep=0)
im.GetPointData().SetScalars(uchar_array)
if (affine is None):
affine = np.eye(4)
transform = vtk.vtkTransform()
transform_matrix = vtk.vtkMatrix4x4()
transform_matrix.DeepCopy((affine[0][0], affine[0][1], affine[0][2], affine[0][3], affine[1][0], affine[1][1], affine[1][2], affine[1][3], affine[2][0], affine[2][1], affine[2][2], affine[2][3], affine[3][0], affine[3][1], affine[3][2], affine[3][3]))
transform.SetMatrix(transform_matrix)
transform.Inverse()
image_resliced = vtk.vtkImageReslice()
set_input(image_resliced, im)
image_resliced.SetResliceTransform(transform)
image_resliced.AutoCropOutputOn()
image_resliced.SetInterpolationModeToLinear()
image_resliced.Update()
if (nb_components == 1):
if (lookup_colormap is None):
lut = colormap_lookup_table((0, 255), (0, 0), (0, 0), (0, 1))
else:
lut = lookup_colormap
(x1, x2, y1, y2, z1, z2) = im.GetExtent()
(ex1, ex2, ey1, ey2, ez1, ez2) = image_resliced.GetOutput().GetExtent()
class ImageActor(vtk.vtkImageActor):
def input_connection(self, output):
if (vtk.VTK_MAJOR_VERSION <= 5):
self.SetInput(output.GetOutput())
else:
self.GetMapper().SetInputConnection(output.GetOutputPort())
self.output = output
self.shape = ((ex2 + 1), (ey2 + 1), (ez2 + 1))
def display_extent(self, x1, x2, y1, y2, z1, z2):
self.SetDisplayExtent(x1, x2, y1, y2, z1, z2)
if (vtk.VTK_MAJOR_VERSION > 5):
self.Update()
def display(self, x=None, y=None, z=None):
if ((x is None) and (y is None) and (z is None)):
self.display_extent(ex1, ex2, ey1, ey2, (ez2 / 2), (ez2 / 2))
if (x is not None):
self.display_extent(x, x, ey1, ey2, ez1, ez2)
if (y is not None):
self.display_extent(ex1, ex2, y, y, ez1, ez2)
if (z is not None):
self.display_extent(ex1, ex2, ey1, ey2, z, z)
def opacity(self, value):
if (vtk.VTK_MAJOR_VERSION <= 5):
self.SetOpacity(value)
else:
self.GetProperty().SetOpacity(value)
def copy(self):
im_actor = ImageActor()
im_actor.input_connection(self.output)
im_actor.SetDisplayExtent(*self.GetDisplayExtent())
im_actor.opacity(opacity)
return im_actor
image_actor = ImageActor()
if (nb_components == 1):
plane_colors = vtk.vtkImageMapToColors()
plane_colors.SetLookupTable(lut)
plane_colors.SetInputConnection(image_resliced.GetOutputPort())
plane_colors.Update()
image_actor.input_connection(plane_colors)
else:
image_actor.input_connection(image_resliced)
image_actor.display()
image_actor.opacity(opacity)
return image_actor
|
Cuts 3D scalar or rgb volumes into 2D images
Parameters
----------
data : array, shape (X, Y, Z) or (X, Y, Z, 3)
A grayscale or rgb 4D volume as a numpy array.
affine : array, shape (4, 4)
Grid to space (usually RAS 1mm) transformation matrix. Default is None.
If None then the identity matrix is used.
value_range : None or tuple (2,)
If None then the values will be interpolated from (data.min(),
data.max()) to (0, 255). Otherwise from (value_range[0],
value_range[1]) to (0, 255).
opacity : float
Opacity of 0 means completely transparent and 1 completely visible.
lookup_colormap : vtkLookupTable
If None (default) then a grayscale map is created.
Returns
-------
image_actor : ImageActor
An object that is capable of displaying different parts of the volume
as slices. The key method of this object is ``display_extent`` where
one can input grid coordinates and display the slice in space (or grid)
coordinates as calculated by the affine parameter.
|
dipy/viz/actor.py
|
slicer
|
JohnGriffiths/dipy
| 0 |
python
|
def slicer(data, affine=None, value_range=None, opacity=1.0, lookup_colormap=None):
' Cuts 3D scalar or rgb volumes into 2D images\n\n Parameters\n ----------\n data : array, shape (X, Y, Z) or (X, Y, Z, 3)\n A grayscale or rgb 4D volume as a numpy array.\n affine : array, shape (4, 4)\n Grid to space (usually RAS 1mm) transformation matrix. Default is None.\n If None then the identity matrix is used.\n value_range : None or tuple (2,)\n If None then the values will be interpolated from (data.min(),\n data.max()) to (0, 255). Otherwise from (value_range[0],\n value_range[1]) to (0, 255).\n opacity : float\n Opacity of 0 means completely transparent and 1 completely visible.\n lookup_colormap : vtkLookupTable\n If None (default) then a grayscale map is created.\n\n Returns\n -------\n image_actor : ImageActor\n An object that is capable of displaying different parts of the volume\n as slices. The key method of this object is ``display_extent`` where\n one can input grid coordinates and display the slice in space (or grid)\n coordinates as calculated by the affine parameter.\n\n '
if (data.ndim != 3):
if (data.ndim == 4):
if (data.shape[3] != 3):
raise ValueError('Only RGB 3D arrays are currently supported.')
else:
nb_components = 3
else:
raise ValueError('Only 3D arrays are currently supported.')
else:
nb_components = 1
if (value_range is None):
vol = np.interp(data, xp=[data.min(), data.max()], fp=[0, 255])
else:
vol = np.interp(data, xp=[value_range[0], value_range[1]], fp=[0, 255])
vol = vol.astype('uint8')
im = vtk.vtkImageData()
if (major_version <= 5):
im.SetScalarTypeToUnsignedChar()
(I, J, K) = vol.shape[:3]
im.SetDimensions(I, J, K)
voxsz = (1.0, 1.0, 1.0)
im.SetSpacing(voxsz[2], voxsz[0], voxsz[1])
if (major_version <= 5):
im.AllocateScalars()
im.SetNumberOfScalarComponents(nb_components)
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, nb_components)
vol = np.swapaxes(vol, 0, 2)
vol = np.ascontiguousarray(vol)
if (nb_components == 1):
vol = vol.ravel()
else:
vol = np.reshape(vol, [np.prod(vol.shape[:3]), vol.shape[3]])
uchar_array = numpy_support.numpy_to_vtk(vol, deep=0)
im.GetPointData().SetScalars(uchar_array)
if (affine is None):
affine = np.eye(4)
transform = vtk.vtkTransform()
transform_matrix = vtk.vtkMatrix4x4()
transform_matrix.DeepCopy((affine[0][0], affine[0][1], affine[0][2], affine[0][3], affine[1][0], affine[1][1], affine[1][2], affine[1][3], affine[2][0], affine[2][1], affine[2][2], affine[2][3], affine[3][0], affine[3][1], affine[3][2], affine[3][3]))
transform.SetMatrix(transform_matrix)
transform.Inverse()
image_resliced = vtk.vtkImageReslice()
set_input(image_resliced, im)
image_resliced.SetResliceTransform(transform)
image_resliced.AutoCropOutputOn()
image_resliced.SetInterpolationModeToLinear()
image_resliced.Update()
if (nb_components == 1):
if (lookup_colormap is None):
lut = colormap_lookup_table((0, 255), (0, 0), (0, 0), (0, 1))
else:
lut = lookup_colormap
(x1, x2, y1, y2, z1, z2) = im.GetExtent()
(ex1, ex2, ey1, ey2, ez1, ez2) = image_resliced.GetOutput().GetExtent()
class ImageActor(vtk.vtkImageActor):
def input_connection(self, output):
if (vtk.VTK_MAJOR_VERSION <= 5):
self.SetInput(output.GetOutput())
else:
self.GetMapper().SetInputConnection(output.GetOutputPort())
self.output = output
self.shape = ((ex2 + 1), (ey2 + 1), (ez2 + 1))
def display_extent(self, x1, x2, y1, y2, z1, z2):
self.SetDisplayExtent(x1, x2, y1, y2, z1, z2)
if (vtk.VTK_MAJOR_VERSION > 5):
self.Update()
def display(self, x=None, y=None, z=None):
if ((x is None) and (y is None) and (z is None)):
self.display_extent(ex1, ex2, ey1, ey2, (ez2 / 2), (ez2 / 2))
if (x is not None):
self.display_extent(x, x, ey1, ey2, ez1, ez2)
if (y is not None):
self.display_extent(ex1, ex2, y, y, ez1, ez2)
if (z is not None):
self.display_extent(ex1, ex2, ey1, ey2, z, z)
def opacity(self, value):
if (vtk.VTK_MAJOR_VERSION <= 5):
self.SetOpacity(value)
else:
self.GetProperty().SetOpacity(value)
def copy(self):
im_actor = ImageActor()
im_actor.input_connection(self.output)
im_actor.SetDisplayExtent(*self.GetDisplayExtent())
im_actor.opacity(opacity)
return im_actor
image_actor = ImageActor()
if (nb_components == 1):
plane_colors = vtk.vtkImageMapToColors()
plane_colors.SetLookupTable(lut)
plane_colors.SetInputConnection(image_resliced.GetOutputPort())
plane_colors.Update()
image_actor.input_connection(plane_colors)
else:
image_actor.input_connection(image_resliced)
image_actor.display()
image_actor.opacity(opacity)
return image_actor
|
def slicer(data, affine=None, value_range=None, opacity=1.0, lookup_colormap=None):
' Cuts 3D scalar or rgb volumes into 2D images\n\n Parameters\n ----------\n data : array, shape (X, Y, Z) or (X, Y, Z, 3)\n A grayscale or rgb 4D volume as a numpy array.\n affine : array, shape (4, 4)\n Grid to space (usually RAS 1mm) transformation matrix. Default is None.\n If None then the identity matrix is used.\n value_range : None or tuple (2,)\n If None then the values will be interpolated from (data.min(),\n data.max()) to (0, 255). Otherwise from (value_range[0],\n value_range[1]) to (0, 255).\n opacity : float\n Opacity of 0 means completely transparent and 1 completely visible.\n lookup_colormap : vtkLookupTable\n If None (default) then a grayscale map is created.\n\n Returns\n -------\n image_actor : ImageActor\n An object that is capable of displaying different parts of the volume\n as slices. The key method of this object is ``display_extent`` where\n one can input grid coordinates and display the slice in space (or grid)\n coordinates as calculated by the affine parameter.\n\n '
if (data.ndim != 3):
if (data.ndim == 4):
if (data.shape[3] != 3):
raise ValueError('Only RGB 3D arrays are currently supported.')
else:
nb_components = 3
else:
raise ValueError('Only 3D arrays are currently supported.')
else:
nb_components = 1
if (value_range is None):
vol = np.interp(data, xp=[data.min(), data.max()], fp=[0, 255])
else:
vol = np.interp(data, xp=[value_range[0], value_range[1]], fp=[0, 255])
vol = vol.astype('uint8')
im = vtk.vtkImageData()
if (major_version <= 5):
im.SetScalarTypeToUnsignedChar()
(I, J, K) = vol.shape[:3]
im.SetDimensions(I, J, K)
voxsz = (1.0, 1.0, 1.0)
im.SetSpacing(voxsz[2], voxsz[0], voxsz[1])
if (major_version <= 5):
im.AllocateScalars()
im.SetNumberOfScalarComponents(nb_components)
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, nb_components)
vol = np.swapaxes(vol, 0, 2)
vol = np.ascontiguousarray(vol)
if (nb_components == 1):
vol = vol.ravel()
else:
vol = np.reshape(vol, [np.prod(vol.shape[:3]), vol.shape[3]])
uchar_array = numpy_support.numpy_to_vtk(vol, deep=0)
im.GetPointData().SetScalars(uchar_array)
if (affine is None):
affine = np.eye(4)
transform = vtk.vtkTransform()
transform_matrix = vtk.vtkMatrix4x4()
transform_matrix.DeepCopy((affine[0][0], affine[0][1], affine[0][2], affine[0][3], affine[1][0], affine[1][1], affine[1][2], affine[1][3], affine[2][0], affine[2][1], affine[2][2], affine[2][3], affine[3][0], affine[3][1], affine[3][2], affine[3][3]))
transform.SetMatrix(transform_matrix)
transform.Inverse()
image_resliced = vtk.vtkImageReslice()
set_input(image_resliced, im)
image_resliced.SetResliceTransform(transform)
image_resliced.AutoCropOutputOn()
image_resliced.SetInterpolationModeToLinear()
image_resliced.Update()
if (nb_components == 1):
if (lookup_colormap is None):
lut = colormap_lookup_table((0, 255), (0, 0), (0, 0), (0, 1))
else:
lut = lookup_colormap
(x1, x2, y1, y2, z1, z2) = im.GetExtent()
(ex1, ex2, ey1, ey2, ez1, ez2) = image_resliced.GetOutput().GetExtent()
class ImageActor(vtk.vtkImageActor):
def input_connection(self, output):
if (vtk.VTK_MAJOR_VERSION <= 5):
self.SetInput(output.GetOutput())
else:
self.GetMapper().SetInputConnection(output.GetOutputPort())
self.output = output
self.shape = ((ex2 + 1), (ey2 + 1), (ez2 + 1))
def display_extent(self, x1, x2, y1, y2, z1, z2):
self.SetDisplayExtent(x1, x2, y1, y2, z1, z2)
if (vtk.VTK_MAJOR_VERSION > 5):
self.Update()
def display(self, x=None, y=None, z=None):
if ((x is None) and (y is None) and (z is None)):
self.display_extent(ex1, ex2, ey1, ey2, (ez2 / 2), (ez2 / 2))
if (x is not None):
self.display_extent(x, x, ey1, ey2, ez1, ez2)
if (y is not None):
self.display_extent(ex1, ex2, y, y, ez1, ez2)
if (z is not None):
self.display_extent(ex1, ex2, ey1, ey2, z, z)
def opacity(self, value):
if (vtk.VTK_MAJOR_VERSION <= 5):
self.SetOpacity(value)
else:
self.GetProperty().SetOpacity(value)
def copy(self):
im_actor = ImageActor()
im_actor.input_connection(self.output)
im_actor.SetDisplayExtent(*self.GetDisplayExtent())
im_actor.opacity(opacity)
return im_actor
image_actor = ImageActor()
if (nb_components == 1):
plane_colors = vtk.vtkImageMapToColors()
plane_colors.SetLookupTable(lut)
plane_colors.SetInputConnection(image_resliced.GetOutputPort())
plane_colors.Update()
image_actor.input_connection(plane_colors)
else:
image_actor.input_connection(image_resliced)
image_actor.display()
image_actor.opacity(opacity)
return image_actor<|docstring|>Cuts 3D scalar or rgb volumes into 2D images
Parameters
----------
data : array, shape (X, Y, Z) or (X, Y, Z, 3)
A grayscale or rgb 4D volume as a numpy array.
affine : array, shape (4, 4)
Grid to space (usually RAS 1mm) transformation matrix. Default is None.
If None then the identity matrix is used.
value_range : None or tuple (2,)
If None then the values will be interpolated from (data.min(),
data.max()) to (0, 255). Otherwise from (value_range[0],
value_range[1]) to (0, 255).
opacity : float
Opacity of 0 means completely transparent and 1 completely visible.
lookup_colormap : vtkLookupTable
If None (default) then a grayscale map is created.
Returns
-------
image_actor : ImageActor
An object that is capable of displaying different parts of the volume
as slices. The key method of this object is ``display_extent`` where
one can input grid coordinates and display the slice in space (or grid)
coordinates as calculated by the affine parameter.<|endoftext|>
|
5150e67d5985d52d19c7dae043b4eb4ca87731cd2598b0674ff988908c9f1106
|
def streamtube(lines, colors=None, opacity=1, linewidth=0.01, tube_sides=9, lod=True, lod_points=(10 ** 4), lod_points_size=3, spline_subdiv=None, lookup_colormap=None):
' Uses streamtubes to visualize polylines\n\n Parameters\n ----------\n lines : list\n list of N curves represented as 2D ndarrays\n\n colors : array (N, 3), list of arrays, tuple (3,), array (K,), None\n If None then a standard orientation colormap is used for every line.\n If one tuple of color is used. Then all streamlines will have the same\n colour.\n If an array (N, 3) is given, where N is equal to the number of lines.\n Then every line is coloured with a different RGB color.\n If a list of RGB arrays is given then every point of every line takes\n a different color.\n If an array (K, ) is given, where K is the number of points of all\n lines then these are considered as the values to be used by the\n colormap.\n If an array (L, ) is given, where L is the number of streamlines then\n these are considered as the values to be used by the colormap per\n streamline.\n If an array (X, Y, Z) or (X, Y, Z, 3) is given then the values for the\n colormap are interpolated automatically using trilinear interpolation.\n\n opacity : float\n Default is 1.\n linewidth : float\n Default is 0.01.\n tube_sides : int\n Default is 9.\n lod : bool\n Use vtkLODActor(level of detail) rather than vtkActor. Default is True.\n Level of detail actors do not render the full geometry when the\n frame rate is low.\n lod_points : int\n Number of points to be used when LOD is in effect. Default is 10000.\n lod_points_size : int\n Size of points when lod is in effect. Default is 3.\n spline_subdiv : int\n Number of splines subdivision to smooth streamtubes. Default is None.\n lookup_colormap : vtkLookupTable\n Add a default lookup table to the colormap. Default is None which calls\n :func:`dipy.viz.actor.colormap_lookup_table`.\n\n Examples\n --------\n >>> import numpy as np\n >>> from dipy.viz import actor, window\n >>> ren = window.Renderer()\n >>> lines = [np.random.rand(10, 3), np.random.rand(20, 3)]\n >>> colors = np.random.rand(2, 3)\n >>> c = actor.streamtube(lines, colors)\n >>> ren.add(c)\n >>> #window.show(ren)\n\n Notes\n -----\n Streamtubes can be heavy on GPU when loading many streamlines and\n therefore, you may experience slow rendering time depending on system GPU.\n A solution to this problem is to reduce the number of points in each\n streamline. In Dipy we provide an algorithm that will reduce the number of\n points on the straighter parts of the streamline but keep more points on\n the curvier parts. This can be used in the following way::\n\n from dipy.tracking.distances import approx_polygon_track\n lines = [approx_polygon_track(line, 0.2) for line in lines]\n\n Alternatively we suggest using the ``line`` actor which is much more\n efficient.\n\n See Also\n --------\n :func:``dipy.viz.actor.line``\n '
(poly_data, is_colormap) = lines_to_vtk_polydata(lines, colors)
next_input = poly_data
poly_normals = set_input(vtk.vtkPolyDataNormals(), next_input)
poly_normals.ComputeCellNormalsOn()
poly_normals.ComputePointNormalsOn()
poly_normals.ConsistencyOn()
poly_normals.AutoOrientNormalsOn()
poly_normals.Update()
next_input = poly_normals.GetOutputPort()
if ((spline_subdiv is not None) and (spline_subdiv > 0)):
spline_filter = set_input(vtk.vtkSplineFilter(), next_input)
spline_filter.SetSubdivideToSpecified()
spline_filter.SetNumberOfSubdivisions(spline_subdiv)
spline_filter.Update()
next_input = spline_filter.GetOutputPort()
tube_filter = set_input(vtk.vtkTubeFilter(), next_input)
tube_filter.SetNumberOfSides(tube_sides)
tube_filter.SetRadius(linewidth)
tube_filter.CappingOn()
tube_filter.Update()
next_input = tube_filter.GetOutputPort()
poly_mapper = set_input(vtk.vtkPolyDataMapper(), next_input)
poly_mapper.ScalarVisibilityOn()
poly_mapper.SetScalarModeToUsePointFieldData()
poly_mapper.SelectColorArray('Colors')
poly_mapper.GlobalImmediateModeRenderingOn()
poly_mapper.Update()
if is_colormap:
if (lookup_colormap is None):
lookup_colormap = colormap_lookup_table()
poly_mapper.SetLookupTable(lookup_colormap)
poly_mapper.UseLookupTableScalarRangeOn()
poly_mapper.Update()
if lod:
actor = vtk.vtkLODActor()
actor.SetNumberOfCloudPoints(lod_points)
actor.GetProperty().SetPointSize(lod_points_size)
else:
actor = vtk.vtkActor()
actor.SetMapper(poly_mapper)
actor.GetProperty().SetAmbient(0.1)
actor.GetProperty().SetDiffuse(0.15)
actor.GetProperty().SetSpecular(0.05)
actor.GetProperty().SetSpecularPower(6)
actor.GetProperty().SetInterpolationToPhong()
actor.GetProperty().BackfaceCullingOn()
actor.GetProperty().SetOpacity(opacity)
return actor
|
Uses streamtubes to visualize polylines
Parameters
----------
lines : list
list of N curves represented as 2D ndarrays
colors : array (N, 3), list of arrays, tuple (3,), array (K,), None
If None then a standard orientation colormap is used for every line.
If one tuple of color is used. Then all streamlines will have the same
colour.
If an array (N, 3) is given, where N is equal to the number of lines.
Then every line is coloured with a different RGB color.
If a list of RGB arrays is given then every point of every line takes
a different color.
If an array (K, ) is given, where K is the number of points of all
lines then these are considered as the values to be used by the
colormap.
If an array (L, ) is given, where L is the number of streamlines then
these are considered as the values to be used by the colormap per
streamline.
If an array (X, Y, Z) or (X, Y, Z, 3) is given then the values for the
colormap are interpolated automatically using trilinear interpolation.
opacity : float
Default is 1.
linewidth : float
Default is 0.01.
tube_sides : int
Default is 9.
lod : bool
Use vtkLODActor(level of detail) rather than vtkActor. Default is True.
Level of detail actors do not render the full geometry when the
frame rate is low.
lod_points : int
Number of points to be used when LOD is in effect. Default is 10000.
lod_points_size : int
Size of points when lod is in effect. Default is 3.
spline_subdiv : int
Number of splines subdivision to smooth streamtubes. Default is None.
lookup_colormap : vtkLookupTable
Add a default lookup table to the colormap. Default is None which calls
:func:`dipy.viz.actor.colormap_lookup_table`.
Examples
--------
>>> import numpy as np
>>> from dipy.viz import actor, window
>>> ren = window.Renderer()
>>> lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
>>> colors = np.random.rand(2, 3)
>>> c = actor.streamtube(lines, colors)
>>> ren.add(c)
>>> #window.show(ren)
Notes
-----
Streamtubes can be heavy on GPU when loading many streamlines and
therefore, you may experience slow rendering time depending on system GPU.
A solution to this problem is to reduce the number of points in each
streamline. In Dipy we provide an algorithm that will reduce the number of
points on the straighter parts of the streamline but keep more points on
the curvier parts. This can be used in the following way::
from dipy.tracking.distances import approx_polygon_track
lines = [approx_polygon_track(line, 0.2) for line in lines]
Alternatively we suggest using the ``line`` actor which is much more
efficient.
See Also
--------
:func:``dipy.viz.actor.line``
|
dipy/viz/actor.py
|
streamtube
|
JohnGriffiths/dipy
| 0 |
python
|
def streamtube(lines, colors=None, opacity=1, linewidth=0.01, tube_sides=9, lod=True, lod_points=(10 ** 4), lod_points_size=3, spline_subdiv=None, lookup_colormap=None):
' Uses streamtubes to visualize polylines\n\n Parameters\n ----------\n lines : list\n list of N curves represented as 2D ndarrays\n\n colors : array (N, 3), list of arrays, tuple (3,), array (K,), None\n If None then a standard orientation colormap is used for every line.\n If one tuple of color is used. Then all streamlines will have the same\n colour.\n If an array (N, 3) is given, where N is equal to the number of lines.\n Then every line is coloured with a different RGB color.\n If a list of RGB arrays is given then every point of every line takes\n a different color.\n If an array (K, ) is given, where K is the number of points of all\n lines then these are considered as the values to be used by the\n colormap.\n If an array (L, ) is given, where L is the number of streamlines then\n these are considered as the values to be used by the colormap per\n streamline.\n If an array (X, Y, Z) or (X, Y, Z, 3) is given then the values for the\n colormap are interpolated automatically using trilinear interpolation.\n\n opacity : float\n Default is 1.\n linewidth : float\n Default is 0.01.\n tube_sides : int\n Default is 9.\n lod : bool\n Use vtkLODActor(level of detail) rather than vtkActor. Default is True.\n Level of detail actors do not render the full geometry when the\n frame rate is low.\n lod_points : int\n Number of points to be used when LOD is in effect. Default is 10000.\n lod_points_size : int\n Size of points when lod is in effect. Default is 3.\n spline_subdiv : int\n Number of splines subdivision to smooth streamtubes. Default is None.\n lookup_colormap : vtkLookupTable\n Add a default lookup table to the colormap. Default is None which calls\n :func:`dipy.viz.actor.colormap_lookup_table`.\n\n Examples\n --------\n >>> import numpy as np\n >>> from dipy.viz import actor, window\n >>> ren = window.Renderer()\n >>> lines = [np.random.rand(10, 3), np.random.rand(20, 3)]\n >>> colors = np.random.rand(2, 3)\n >>> c = actor.streamtube(lines, colors)\n >>> ren.add(c)\n >>> #window.show(ren)\n\n Notes\n -----\n Streamtubes can be heavy on GPU when loading many streamlines and\n therefore, you may experience slow rendering time depending on system GPU.\n A solution to this problem is to reduce the number of points in each\n streamline. In Dipy we provide an algorithm that will reduce the number of\n points on the straighter parts of the streamline but keep more points on\n the curvier parts. This can be used in the following way::\n\n from dipy.tracking.distances import approx_polygon_track\n lines = [approx_polygon_track(line, 0.2) for line in lines]\n\n Alternatively we suggest using the ``line`` actor which is much more\n efficient.\n\n See Also\n --------\n :func:``dipy.viz.actor.line``\n '
(poly_data, is_colormap) = lines_to_vtk_polydata(lines, colors)
next_input = poly_data
poly_normals = set_input(vtk.vtkPolyDataNormals(), next_input)
poly_normals.ComputeCellNormalsOn()
poly_normals.ComputePointNormalsOn()
poly_normals.ConsistencyOn()
poly_normals.AutoOrientNormalsOn()
poly_normals.Update()
next_input = poly_normals.GetOutputPort()
if ((spline_subdiv is not None) and (spline_subdiv > 0)):
spline_filter = set_input(vtk.vtkSplineFilter(), next_input)
spline_filter.SetSubdivideToSpecified()
spline_filter.SetNumberOfSubdivisions(spline_subdiv)
spline_filter.Update()
next_input = spline_filter.GetOutputPort()
tube_filter = set_input(vtk.vtkTubeFilter(), next_input)
tube_filter.SetNumberOfSides(tube_sides)
tube_filter.SetRadius(linewidth)
tube_filter.CappingOn()
tube_filter.Update()
next_input = tube_filter.GetOutputPort()
poly_mapper = set_input(vtk.vtkPolyDataMapper(), next_input)
poly_mapper.ScalarVisibilityOn()
poly_mapper.SetScalarModeToUsePointFieldData()
poly_mapper.SelectColorArray('Colors')
poly_mapper.GlobalImmediateModeRenderingOn()
poly_mapper.Update()
if is_colormap:
if (lookup_colormap is None):
lookup_colormap = colormap_lookup_table()
poly_mapper.SetLookupTable(lookup_colormap)
poly_mapper.UseLookupTableScalarRangeOn()
poly_mapper.Update()
if lod:
actor = vtk.vtkLODActor()
actor.SetNumberOfCloudPoints(lod_points)
actor.GetProperty().SetPointSize(lod_points_size)
else:
actor = vtk.vtkActor()
actor.SetMapper(poly_mapper)
actor.GetProperty().SetAmbient(0.1)
actor.GetProperty().SetDiffuse(0.15)
actor.GetProperty().SetSpecular(0.05)
actor.GetProperty().SetSpecularPower(6)
actor.GetProperty().SetInterpolationToPhong()
actor.GetProperty().BackfaceCullingOn()
actor.GetProperty().SetOpacity(opacity)
return actor
|
def streamtube(lines, colors=None, opacity=1, linewidth=0.01, tube_sides=9, lod=True, lod_points=(10 ** 4), lod_points_size=3, spline_subdiv=None, lookup_colormap=None):
' Uses streamtubes to visualize polylines\n\n Parameters\n ----------\n lines : list\n list of N curves represented as 2D ndarrays\n\n colors : array (N, 3), list of arrays, tuple (3,), array (K,), None\n If None then a standard orientation colormap is used for every line.\n If one tuple of color is used. Then all streamlines will have the same\n colour.\n If an array (N, 3) is given, where N is equal to the number of lines.\n Then every line is coloured with a different RGB color.\n If a list of RGB arrays is given then every point of every line takes\n a different color.\n If an array (K, ) is given, where K is the number of points of all\n lines then these are considered as the values to be used by the\n colormap.\n If an array (L, ) is given, where L is the number of streamlines then\n these are considered as the values to be used by the colormap per\n streamline.\n If an array (X, Y, Z) or (X, Y, Z, 3) is given then the values for the\n colormap are interpolated automatically using trilinear interpolation.\n\n opacity : float\n Default is 1.\n linewidth : float\n Default is 0.01.\n tube_sides : int\n Default is 9.\n lod : bool\n Use vtkLODActor(level of detail) rather than vtkActor. Default is True.\n Level of detail actors do not render the full geometry when the\n frame rate is low.\n lod_points : int\n Number of points to be used when LOD is in effect. Default is 10000.\n lod_points_size : int\n Size of points when lod is in effect. Default is 3.\n spline_subdiv : int\n Number of splines subdivision to smooth streamtubes. Default is None.\n lookup_colormap : vtkLookupTable\n Add a default lookup table to the colormap. Default is None which calls\n :func:`dipy.viz.actor.colormap_lookup_table`.\n\n Examples\n --------\n >>> import numpy as np\n >>> from dipy.viz import actor, window\n >>> ren = window.Renderer()\n >>> lines = [np.random.rand(10, 3), np.random.rand(20, 3)]\n >>> colors = np.random.rand(2, 3)\n >>> c = actor.streamtube(lines, colors)\n >>> ren.add(c)\n >>> #window.show(ren)\n\n Notes\n -----\n Streamtubes can be heavy on GPU when loading many streamlines and\n therefore, you may experience slow rendering time depending on system GPU.\n A solution to this problem is to reduce the number of points in each\n streamline. In Dipy we provide an algorithm that will reduce the number of\n points on the straighter parts of the streamline but keep more points on\n the curvier parts. This can be used in the following way::\n\n from dipy.tracking.distances import approx_polygon_track\n lines = [approx_polygon_track(line, 0.2) for line in lines]\n\n Alternatively we suggest using the ``line`` actor which is much more\n efficient.\n\n See Also\n --------\n :func:``dipy.viz.actor.line``\n '
(poly_data, is_colormap) = lines_to_vtk_polydata(lines, colors)
next_input = poly_data
poly_normals = set_input(vtk.vtkPolyDataNormals(), next_input)
poly_normals.ComputeCellNormalsOn()
poly_normals.ComputePointNormalsOn()
poly_normals.ConsistencyOn()
poly_normals.AutoOrientNormalsOn()
poly_normals.Update()
next_input = poly_normals.GetOutputPort()
if ((spline_subdiv is not None) and (spline_subdiv > 0)):
spline_filter = set_input(vtk.vtkSplineFilter(), next_input)
spline_filter.SetSubdivideToSpecified()
spline_filter.SetNumberOfSubdivisions(spline_subdiv)
spline_filter.Update()
next_input = spline_filter.GetOutputPort()
tube_filter = set_input(vtk.vtkTubeFilter(), next_input)
tube_filter.SetNumberOfSides(tube_sides)
tube_filter.SetRadius(linewidth)
tube_filter.CappingOn()
tube_filter.Update()
next_input = tube_filter.GetOutputPort()
poly_mapper = set_input(vtk.vtkPolyDataMapper(), next_input)
poly_mapper.ScalarVisibilityOn()
poly_mapper.SetScalarModeToUsePointFieldData()
poly_mapper.SelectColorArray('Colors')
poly_mapper.GlobalImmediateModeRenderingOn()
poly_mapper.Update()
if is_colormap:
if (lookup_colormap is None):
lookup_colormap = colormap_lookup_table()
poly_mapper.SetLookupTable(lookup_colormap)
poly_mapper.UseLookupTableScalarRangeOn()
poly_mapper.Update()
if lod:
actor = vtk.vtkLODActor()
actor.SetNumberOfCloudPoints(lod_points)
actor.GetProperty().SetPointSize(lod_points_size)
else:
actor = vtk.vtkActor()
actor.SetMapper(poly_mapper)
actor.GetProperty().SetAmbient(0.1)
actor.GetProperty().SetDiffuse(0.15)
actor.GetProperty().SetSpecular(0.05)
actor.GetProperty().SetSpecularPower(6)
actor.GetProperty().SetInterpolationToPhong()
actor.GetProperty().BackfaceCullingOn()
actor.GetProperty().SetOpacity(opacity)
return actor<|docstring|>Uses streamtubes to visualize polylines
Parameters
----------
lines : list
list of N curves represented as 2D ndarrays
colors : array (N, 3), list of arrays, tuple (3,), array (K,), None
If None then a standard orientation colormap is used for every line.
If one tuple of color is used. Then all streamlines will have the same
colour.
If an array (N, 3) is given, where N is equal to the number of lines.
Then every line is coloured with a different RGB color.
If a list of RGB arrays is given then every point of every line takes
a different color.
If an array (K, ) is given, where K is the number of points of all
lines then these are considered as the values to be used by the
colormap.
If an array (L, ) is given, where L is the number of streamlines then
these are considered as the values to be used by the colormap per
streamline.
If an array (X, Y, Z) or (X, Y, Z, 3) is given then the values for the
colormap are interpolated automatically using trilinear interpolation.
opacity : float
Default is 1.
linewidth : float
Default is 0.01.
tube_sides : int
Default is 9.
lod : bool
Use vtkLODActor(level of detail) rather than vtkActor. Default is True.
Level of detail actors do not render the full geometry when the
frame rate is low.
lod_points : int
Number of points to be used when LOD is in effect. Default is 10000.
lod_points_size : int
Size of points when lod is in effect. Default is 3.
spline_subdiv : int
Number of splines subdivision to smooth streamtubes. Default is None.
lookup_colormap : vtkLookupTable
Add a default lookup table to the colormap. Default is None which calls
:func:`dipy.viz.actor.colormap_lookup_table`.
Examples
--------
>>> import numpy as np
>>> from dipy.viz import actor, window
>>> ren = window.Renderer()
>>> lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
>>> colors = np.random.rand(2, 3)
>>> c = actor.streamtube(lines, colors)
>>> ren.add(c)
>>> #window.show(ren)
Notes
-----
Streamtubes can be heavy on GPU when loading many streamlines and
therefore, you may experience slow rendering time depending on system GPU.
A solution to this problem is to reduce the number of points in each
streamline. In Dipy we provide an algorithm that will reduce the number of
points on the straighter parts of the streamline but keep more points on
the curvier parts. This can be used in the following way::
from dipy.tracking.distances import approx_polygon_track
lines = [approx_polygon_track(line, 0.2) for line in lines]
Alternatively we suggest using the ``line`` actor which is much more
efficient.
See Also
--------
:func:``dipy.viz.actor.line``<|endoftext|>
|
0fe8e9c2e842f8853b9698eb85188f448b0c29f9b4fb1093ffaa16964f9d8f29
|
def line(lines, colors=None, opacity=1, linewidth=1, spline_subdiv=None, lod=True, lod_points=(10 ** 4), lod_points_size=3, lookup_colormap=None):
' Create an actor for one or more lines.\n\n Parameters\n ------------\n lines : list of arrays\n\n colors : array (N, 3), list of arrays, tuple (3,), array (K,), None\n If None then a standard orientation colormap is used for every line.\n If one tuple of color is used. Then all streamlines will have the same\n colour.\n If an array (N, 3) is given, where N is equal to the number of lines.\n Then every line is coloured with a different RGB color.\n If a list of RGB arrays is given then every point of every line takes\n a different color.\n If an array (K, ) is given, where K is the number of points of all\n lines then these are considered as the values to be used by the\n colormap.\n If an array (L, ) is given, where L is the number of streamlines then\n these are considered as the values to be used by the colormap per\n streamline.\n If an array (X, Y, Z) or (X, Y, Z, 3) is given then the values for the\n colormap are interpolated automatically using trilinear interpolation.\n\n opacity : float, optional\n Default is 1.\n\n linewidth : float, optional\n Line thickness. Default is 1.\n spline_subdiv : int, optional\n Number of splines subdivision to smooth streamtubes. Default is None\n which means no subdivision.\n lod : bool\n Use vtkLODActor(level of detail) rather than vtkActor. Default is True.\n Level of detail actors do not render the full geometry when the\n frame rate is low.\n lod_points : int\n Number of points to be used when LOD is in effect. Default is 10000.\n lod_points_size : int\n Size of points when lod is in effect. Default is 3.\n lookup_colormap : bool, optional\n Add a default lookup table to the colormap. Default is None which calls\n :func:`dipy.viz.actor.colormap_lookup_table`.\n\n Returns\n ----------\n v : vtkActor or vtkLODActor object\n Line.\n\n Examples\n ----------\n >>> from dipy.viz import actor, window\n >>> ren = window.Renderer()\n >>> lines = [np.random.rand(10, 3), np.random.rand(20, 3)]\n >>> colors = np.random.rand(2, 3)\n >>> c = actor.line(lines, colors)\n >>> ren.add(c)\n >>> #window.show(ren)\n '
(poly_data, is_colormap) = lines_to_vtk_polydata(lines, colors)
next_input = poly_data
if ((spline_subdiv is not None) and (spline_subdiv > 0)):
spline_filter = set_input(vtk.vtkSplineFilter(), next_input)
spline_filter.SetSubdivideToSpecified()
spline_filter.SetNumberOfSubdivisions(spline_subdiv)
spline_filter.Update()
next_input = spline_filter.GetOutputPort()
poly_mapper = set_input(vtk.vtkPolyDataMapper(), next_input)
poly_mapper.ScalarVisibilityOn()
poly_mapper.SetScalarModeToUsePointFieldData()
poly_mapper.SelectColorArray('Colors')
poly_mapper.Update()
if is_colormap:
if (lookup_colormap is None):
lookup_colormap = colormap_lookup_table()
poly_mapper.SetLookupTable(lookup_colormap)
poly_mapper.UseLookupTableScalarRangeOn()
poly_mapper.Update()
if lod:
actor = vtk.vtkLODActor()
actor.SetNumberOfCloudPoints(lod_points)
actor.GetProperty().SetPointSize(lod_points_size)
else:
actor = vtk.vtkActor()
actor.SetMapper(poly_mapper)
actor.GetProperty().SetLineWidth(linewidth)
actor.GetProperty().SetOpacity(opacity)
return actor
|
Create an actor for one or more lines.
Parameters
------------
lines : list of arrays
colors : array (N, 3), list of arrays, tuple (3,), array (K,), None
If None then a standard orientation colormap is used for every line.
If one tuple of color is used. Then all streamlines will have the same
colour.
If an array (N, 3) is given, where N is equal to the number of lines.
Then every line is coloured with a different RGB color.
If a list of RGB arrays is given then every point of every line takes
a different color.
If an array (K, ) is given, where K is the number of points of all
lines then these are considered as the values to be used by the
colormap.
If an array (L, ) is given, where L is the number of streamlines then
these are considered as the values to be used by the colormap per
streamline.
If an array (X, Y, Z) or (X, Y, Z, 3) is given then the values for the
colormap are interpolated automatically using trilinear interpolation.
opacity : float, optional
Default is 1.
linewidth : float, optional
Line thickness. Default is 1.
spline_subdiv : int, optional
Number of splines subdivision to smooth streamtubes. Default is None
which means no subdivision.
lod : bool
Use vtkLODActor(level of detail) rather than vtkActor. Default is True.
Level of detail actors do not render the full geometry when the
frame rate is low.
lod_points : int
Number of points to be used when LOD is in effect. Default is 10000.
lod_points_size : int
Size of points when lod is in effect. Default is 3.
lookup_colormap : bool, optional
Add a default lookup table to the colormap. Default is None which calls
:func:`dipy.viz.actor.colormap_lookup_table`.
Returns
----------
v : vtkActor or vtkLODActor object
Line.
Examples
----------
>>> from dipy.viz import actor, window
>>> ren = window.Renderer()
>>> lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
>>> colors = np.random.rand(2, 3)
>>> c = actor.line(lines, colors)
>>> ren.add(c)
>>> #window.show(ren)
|
dipy/viz/actor.py
|
line
|
JohnGriffiths/dipy
| 0 |
python
|
def line(lines, colors=None, opacity=1, linewidth=1, spline_subdiv=None, lod=True, lod_points=(10 ** 4), lod_points_size=3, lookup_colormap=None):
' Create an actor for one or more lines.\n\n Parameters\n ------------\n lines : list of arrays\n\n colors : array (N, 3), list of arrays, tuple (3,), array (K,), None\n If None then a standard orientation colormap is used for every line.\n If one tuple of color is used. Then all streamlines will have the same\n colour.\n If an array (N, 3) is given, where N is equal to the number of lines.\n Then every line is coloured with a different RGB color.\n If a list of RGB arrays is given then every point of every line takes\n a different color.\n If an array (K, ) is given, where K is the number of points of all\n lines then these are considered as the values to be used by the\n colormap.\n If an array (L, ) is given, where L is the number of streamlines then\n these are considered as the values to be used by the colormap per\n streamline.\n If an array (X, Y, Z) or (X, Y, Z, 3) is given then the values for the\n colormap are interpolated automatically using trilinear interpolation.\n\n opacity : float, optional\n Default is 1.\n\n linewidth : float, optional\n Line thickness. Default is 1.\n spline_subdiv : int, optional\n Number of splines subdivision to smooth streamtubes. Default is None\n which means no subdivision.\n lod : bool\n Use vtkLODActor(level of detail) rather than vtkActor. Default is True.\n Level of detail actors do not render the full geometry when the\n frame rate is low.\n lod_points : int\n Number of points to be used when LOD is in effect. Default is 10000.\n lod_points_size : int\n Size of points when lod is in effect. Default is 3.\n lookup_colormap : bool, optional\n Add a default lookup table to the colormap. Default is None which calls\n :func:`dipy.viz.actor.colormap_lookup_table`.\n\n Returns\n ----------\n v : vtkActor or vtkLODActor object\n Line.\n\n Examples\n ----------\n >>> from dipy.viz import actor, window\n >>> ren = window.Renderer()\n >>> lines = [np.random.rand(10, 3), np.random.rand(20, 3)]\n >>> colors = np.random.rand(2, 3)\n >>> c = actor.line(lines, colors)\n >>> ren.add(c)\n >>> #window.show(ren)\n '
(poly_data, is_colormap) = lines_to_vtk_polydata(lines, colors)
next_input = poly_data
if ((spline_subdiv is not None) and (spline_subdiv > 0)):
spline_filter = set_input(vtk.vtkSplineFilter(), next_input)
spline_filter.SetSubdivideToSpecified()
spline_filter.SetNumberOfSubdivisions(spline_subdiv)
spline_filter.Update()
next_input = spline_filter.GetOutputPort()
poly_mapper = set_input(vtk.vtkPolyDataMapper(), next_input)
poly_mapper.ScalarVisibilityOn()
poly_mapper.SetScalarModeToUsePointFieldData()
poly_mapper.SelectColorArray('Colors')
poly_mapper.Update()
if is_colormap:
if (lookup_colormap is None):
lookup_colormap = colormap_lookup_table()
poly_mapper.SetLookupTable(lookup_colormap)
poly_mapper.UseLookupTableScalarRangeOn()
poly_mapper.Update()
if lod:
actor = vtk.vtkLODActor()
actor.SetNumberOfCloudPoints(lod_points)
actor.GetProperty().SetPointSize(lod_points_size)
else:
actor = vtk.vtkActor()
actor.SetMapper(poly_mapper)
actor.GetProperty().SetLineWidth(linewidth)
actor.GetProperty().SetOpacity(opacity)
return actor
|
def line(lines, colors=None, opacity=1, linewidth=1, spline_subdiv=None, lod=True, lod_points=(10 ** 4), lod_points_size=3, lookup_colormap=None):
' Create an actor for one or more lines.\n\n Parameters\n ------------\n lines : list of arrays\n\n colors : array (N, 3), list of arrays, tuple (3,), array (K,), None\n If None then a standard orientation colormap is used for every line.\n If one tuple of color is used. Then all streamlines will have the same\n colour.\n If an array (N, 3) is given, where N is equal to the number of lines.\n Then every line is coloured with a different RGB color.\n If a list of RGB arrays is given then every point of every line takes\n a different color.\n If an array (K, ) is given, where K is the number of points of all\n lines then these are considered as the values to be used by the\n colormap.\n If an array (L, ) is given, where L is the number of streamlines then\n these are considered as the values to be used by the colormap per\n streamline.\n If an array (X, Y, Z) or (X, Y, Z, 3) is given then the values for the\n colormap are interpolated automatically using trilinear interpolation.\n\n opacity : float, optional\n Default is 1.\n\n linewidth : float, optional\n Line thickness. Default is 1.\n spline_subdiv : int, optional\n Number of splines subdivision to smooth streamtubes. Default is None\n which means no subdivision.\n lod : bool\n Use vtkLODActor(level of detail) rather than vtkActor. Default is True.\n Level of detail actors do not render the full geometry when the\n frame rate is low.\n lod_points : int\n Number of points to be used when LOD is in effect. Default is 10000.\n lod_points_size : int\n Size of points when lod is in effect. Default is 3.\n lookup_colormap : bool, optional\n Add a default lookup table to the colormap. Default is None which calls\n :func:`dipy.viz.actor.colormap_lookup_table`.\n\n Returns\n ----------\n v : vtkActor or vtkLODActor object\n Line.\n\n Examples\n ----------\n >>> from dipy.viz import actor, window\n >>> ren = window.Renderer()\n >>> lines = [np.random.rand(10, 3), np.random.rand(20, 3)]\n >>> colors = np.random.rand(2, 3)\n >>> c = actor.line(lines, colors)\n >>> ren.add(c)\n >>> #window.show(ren)\n '
(poly_data, is_colormap) = lines_to_vtk_polydata(lines, colors)
next_input = poly_data
if ((spline_subdiv is not None) and (spline_subdiv > 0)):
spline_filter = set_input(vtk.vtkSplineFilter(), next_input)
spline_filter.SetSubdivideToSpecified()
spline_filter.SetNumberOfSubdivisions(spline_subdiv)
spline_filter.Update()
next_input = spline_filter.GetOutputPort()
poly_mapper = set_input(vtk.vtkPolyDataMapper(), next_input)
poly_mapper.ScalarVisibilityOn()
poly_mapper.SetScalarModeToUsePointFieldData()
poly_mapper.SelectColorArray('Colors')
poly_mapper.Update()
if is_colormap:
if (lookup_colormap is None):
lookup_colormap = colormap_lookup_table()
poly_mapper.SetLookupTable(lookup_colormap)
poly_mapper.UseLookupTableScalarRangeOn()
poly_mapper.Update()
if lod:
actor = vtk.vtkLODActor()
actor.SetNumberOfCloudPoints(lod_points)
actor.GetProperty().SetPointSize(lod_points_size)
else:
actor = vtk.vtkActor()
actor.SetMapper(poly_mapper)
actor.GetProperty().SetLineWidth(linewidth)
actor.GetProperty().SetOpacity(opacity)
return actor<|docstring|>Create an actor for one or more lines.
Parameters
------------
lines : list of arrays
colors : array (N, 3), list of arrays, tuple (3,), array (K,), None
If None then a standard orientation colormap is used for every line.
If one tuple of color is used. Then all streamlines will have the same
colour.
If an array (N, 3) is given, where N is equal to the number of lines.
Then every line is coloured with a different RGB color.
If a list of RGB arrays is given then every point of every line takes
a different color.
If an array (K, ) is given, where K is the number of points of all
lines then these are considered as the values to be used by the
colormap.
If an array (L, ) is given, where L is the number of streamlines then
these are considered as the values to be used by the colormap per
streamline.
If an array (X, Y, Z) or (X, Y, Z, 3) is given then the values for the
colormap are interpolated automatically using trilinear interpolation.
opacity : float, optional
Default is 1.
linewidth : float, optional
Line thickness. Default is 1.
spline_subdiv : int, optional
Number of splines subdivision to smooth streamtubes. Default is None
which means no subdivision.
lod : bool
Use vtkLODActor(level of detail) rather than vtkActor. Default is True.
Level of detail actors do not render the full geometry when the
frame rate is low.
lod_points : int
Number of points to be used when LOD is in effect. Default is 10000.
lod_points_size : int
Size of points when lod is in effect. Default is 3.
lookup_colormap : bool, optional
Add a default lookup table to the colormap. Default is None which calls
:func:`dipy.viz.actor.colormap_lookup_table`.
Returns
----------
v : vtkActor or vtkLODActor object
Line.
Examples
----------
>>> from dipy.viz import actor, window
>>> ren = window.Renderer()
>>> lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
>>> colors = np.random.rand(2, 3)
>>> c = actor.line(lines, colors)
>>> ren.add(c)
>>> #window.show(ren)<|endoftext|>
|
359453b52d619369559c9fba31b913dc6ad3253b383e144f30005332b9b607d5
|
def scalar_bar(lookup_table=None, title=' '):
' Default scalar bar actor for a given colormap (colorbar)\n\n Parameters\n ----------\n lookup_table : vtkLookupTable or None\n If None then ``colormap_lookup_table`` is called with default options.\n title : str\n\n Returns\n -------\n scalar_bar : vtkScalarBarActor\n\n See Also\n --------\n :func:`dipy.viz.actor.colormap_lookup_table`\n\n '
lookup_table_copy = vtk.vtkLookupTable()
if (lookup_table is None):
lookup_table = colormap_lookup_table()
lookup_table_copy.DeepCopy(lookup_table)
scalar_bar = vtk.vtkScalarBarActor()
scalar_bar.SetTitle(title)
scalar_bar.SetLookupTable(lookup_table_copy)
scalar_bar.SetNumberOfLabels(6)
return scalar_bar
|
Default scalar bar actor for a given colormap (colorbar)
Parameters
----------
lookup_table : vtkLookupTable or None
If None then ``colormap_lookup_table`` is called with default options.
title : str
Returns
-------
scalar_bar : vtkScalarBarActor
See Also
--------
:func:`dipy.viz.actor.colormap_lookup_table`
|
dipy/viz/actor.py
|
scalar_bar
|
JohnGriffiths/dipy
| 0 |
python
|
def scalar_bar(lookup_table=None, title=' '):
' Default scalar bar actor for a given colormap (colorbar)\n\n Parameters\n ----------\n lookup_table : vtkLookupTable or None\n If None then ``colormap_lookup_table`` is called with default options.\n title : str\n\n Returns\n -------\n scalar_bar : vtkScalarBarActor\n\n See Also\n --------\n :func:`dipy.viz.actor.colormap_lookup_table`\n\n '
lookup_table_copy = vtk.vtkLookupTable()
if (lookup_table is None):
lookup_table = colormap_lookup_table()
lookup_table_copy.DeepCopy(lookup_table)
scalar_bar = vtk.vtkScalarBarActor()
scalar_bar.SetTitle(title)
scalar_bar.SetLookupTable(lookup_table_copy)
scalar_bar.SetNumberOfLabels(6)
return scalar_bar
|
def scalar_bar(lookup_table=None, title=' '):
' Default scalar bar actor for a given colormap (colorbar)\n\n Parameters\n ----------\n lookup_table : vtkLookupTable or None\n If None then ``colormap_lookup_table`` is called with default options.\n title : str\n\n Returns\n -------\n scalar_bar : vtkScalarBarActor\n\n See Also\n --------\n :func:`dipy.viz.actor.colormap_lookup_table`\n\n '
lookup_table_copy = vtk.vtkLookupTable()
if (lookup_table is None):
lookup_table = colormap_lookup_table()
lookup_table_copy.DeepCopy(lookup_table)
scalar_bar = vtk.vtkScalarBarActor()
scalar_bar.SetTitle(title)
scalar_bar.SetLookupTable(lookup_table_copy)
scalar_bar.SetNumberOfLabels(6)
return scalar_bar<|docstring|>Default scalar bar actor for a given colormap (colorbar)
Parameters
----------
lookup_table : vtkLookupTable or None
If None then ``colormap_lookup_table`` is called with default options.
title : str
Returns
-------
scalar_bar : vtkScalarBarActor
See Also
--------
:func:`dipy.viz.actor.colormap_lookup_table`<|endoftext|>
|
c51490a627b70cd2f3a1090fad2e259d9f772c5363db7afd5d42ba1ad0584f27
|
def _arrow(pos=(0, 0, 0), color=(1, 0, 0), scale=(1, 1, 1), opacity=1):
' Internal function for generating arrow actors.\n '
arrow = vtk.vtkArrowSource()
arrowm = vtk.vtkPolyDataMapper()
if (major_version <= 5):
arrowm.SetInput(arrow.GetOutput())
else:
arrowm.SetInputConnection(arrow.GetOutputPort())
arrowa = vtk.vtkActor()
arrowa.SetMapper(arrowm)
arrowa.GetProperty().SetColor(color)
arrowa.GetProperty().SetOpacity(opacity)
arrowa.SetScale(scale)
return arrowa
|
Internal function for generating arrow actors.
|
dipy/viz/actor.py
|
_arrow
|
JohnGriffiths/dipy
| 0 |
python
|
def _arrow(pos=(0, 0, 0), color=(1, 0, 0), scale=(1, 1, 1), opacity=1):
' \n '
arrow = vtk.vtkArrowSource()
arrowm = vtk.vtkPolyDataMapper()
if (major_version <= 5):
arrowm.SetInput(arrow.GetOutput())
else:
arrowm.SetInputConnection(arrow.GetOutputPort())
arrowa = vtk.vtkActor()
arrowa.SetMapper(arrowm)
arrowa.GetProperty().SetColor(color)
arrowa.GetProperty().SetOpacity(opacity)
arrowa.SetScale(scale)
return arrowa
|
def _arrow(pos=(0, 0, 0), color=(1, 0, 0), scale=(1, 1, 1), opacity=1):
' \n '
arrow = vtk.vtkArrowSource()
arrowm = vtk.vtkPolyDataMapper()
if (major_version <= 5):
arrowm.SetInput(arrow.GetOutput())
else:
arrowm.SetInputConnection(arrow.GetOutputPort())
arrowa = vtk.vtkActor()
arrowa.SetMapper(arrowm)
arrowa.GetProperty().SetColor(color)
arrowa.GetProperty().SetOpacity(opacity)
arrowa.SetScale(scale)
return arrowa<|docstring|>Internal function for generating arrow actors.<|endoftext|>
|
d31227902fac7cdc85fa5e886bdb3fbc8c4eb5f410a897737fde9a89a5545364
|
def axes(scale=(1, 1, 1), colorx=(1, 0, 0), colory=(0, 1, 0), colorz=(0, 0, 1), opacity=1):
" Create an actor with the coordinate's system axes where\n red = x, green = y, blue = z.\n\n Parameters\n ----------\n scale : tuple (3,)\n Axes size e.g. (100, 100, 100). Default is (1, 1, 1).\n colorx : tuple (3,)\n x-axis color. Default red (1, 0, 0).\n colory : tuple (3,)\n y-axis color. Default green (0, 1, 0).\n colorz : tuple (3,)\n z-axis color. Default blue (0, 0, 1).\n\n Returns\n -------\n vtkAssembly\n "
arrowx = _arrow(color=colorx, scale=scale, opacity=opacity)
arrowy = _arrow(color=colory, scale=scale, opacity=opacity)
arrowz = _arrow(color=colorz, scale=scale, opacity=opacity)
arrowy.RotateZ(90)
arrowz.RotateY((- 90))
ass = vtk.vtkAssembly()
ass.AddPart(arrowx)
ass.AddPart(arrowy)
ass.AddPart(arrowz)
return ass
|
Create an actor with the coordinate's system axes where
red = x, green = y, blue = z.
Parameters
----------
scale : tuple (3,)
Axes size e.g. (100, 100, 100). Default is (1, 1, 1).
colorx : tuple (3,)
x-axis color. Default red (1, 0, 0).
colory : tuple (3,)
y-axis color. Default green (0, 1, 0).
colorz : tuple (3,)
z-axis color. Default blue (0, 0, 1).
Returns
-------
vtkAssembly
|
dipy/viz/actor.py
|
axes
|
JohnGriffiths/dipy
| 0 |
python
|
def axes(scale=(1, 1, 1), colorx=(1, 0, 0), colory=(0, 1, 0), colorz=(0, 0, 1), opacity=1):
" Create an actor with the coordinate's system axes where\n red = x, green = y, blue = z.\n\n Parameters\n ----------\n scale : tuple (3,)\n Axes size e.g. (100, 100, 100). Default is (1, 1, 1).\n colorx : tuple (3,)\n x-axis color. Default red (1, 0, 0).\n colory : tuple (3,)\n y-axis color. Default green (0, 1, 0).\n colorz : tuple (3,)\n z-axis color. Default blue (0, 0, 1).\n\n Returns\n -------\n vtkAssembly\n "
arrowx = _arrow(color=colorx, scale=scale, opacity=opacity)
arrowy = _arrow(color=colory, scale=scale, opacity=opacity)
arrowz = _arrow(color=colorz, scale=scale, opacity=opacity)
arrowy.RotateZ(90)
arrowz.RotateY((- 90))
ass = vtk.vtkAssembly()
ass.AddPart(arrowx)
ass.AddPart(arrowy)
ass.AddPart(arrowz)
return ass
|
def axes(scale=(1, 1, 1), colorx=(1, 0, 0), colory=(0, 1, 0), colorz=(0, 0, 1), opacity=1):
" Create an actor with the coordinate's system axes where\n red = x, green = y, blue = z.\n\n Parameters\n ----------\n scale : tuple (3,)\n Axes size e.g. (100, 100, 100). Default is (1, 1, 1).\n colorx : tuple (3,)\n x-axis color. Default red (1, 0, 0).\n colory : tuple (3,)\n y-axis color. Default green (0, 1, 0).\n colorz : tuple (3,)\n z-axis color. Default blue (0, 0, 1).\n\n Returns\n -------\n vtkAssembly\n "
arrowx = _arrow(color=colorx, scale=scale, opacity=opacity)
arrowy = _arrow(color=colory, scale=scale, opacity=opacity)
arrowz = _arrow(color=colorz, scale=scale, opacity=opacity)
arrowy.RotateZ(90)
arrowz.RotateY((- 90))
ass = vtk.vtkAssembly()
ass.AddPart(arrowx)
ass.AddPart(arrowy)
ass.AddPart(arrowz)
return ass<|docstring|>Create an actor with the coordinate's system axes where
red = x, green = y, blue = z.
Parameters
----------
scale : tuple (3,)
Axes size e.g. (100, 100, 100). Default is (1, 1, 1).
colorx : tuple (3,)
x-axis color. Default red (1, 0, 0).
colory : tuple (3,)
y-axis color. Default green (0, 1, 0).
colorz : tuple (3,)
z-axis color. Default blue (0, 0, 1).
Returns
-------
vtkAssembly<|endoftext|>
|
e30f1e9f9270b004572ad51d7b225d818cb6117d9108985e2e9ef57055d61411
|
def __init__(self, settings, ui_id, job_id):
'\n Initialises the slurm scheduler class for Bilby\n\n :param settings: The settings from settings.py\n :param ui_id: The UI id of the job\n :param job_id: The Slurm id of the Job\n '
super().__init__(settings, ui_id, job_id)
self.slurm_template = 'settings/bilby_slurm.sh'
self.nodes = 1
self.tasks_per_node = 1
self.memory = 4096
self.walltime = ((60 * 60) * 24)
self.job_name = ('bilby_' + str(uuid.uuid4()))
self.job_parameter_file = os.path.join(self.get_working_directory(), 'json_params.json')
self.job_output_directory = os.path.join(self.get_working_directory(), 'output')
|
Initialises the slurm scheduler class for Bilby
:param settings: The settings from settings.py
:param ui_id: The UI id of the job
:param job_id: The Slurm id of the Job
|
misc/job_controller_scripts/slurm/bilby_slurm.py
|
__init__
|
ASVO-TAO/SS18B-PLasky
| 0 |
python
|
def __init__(self, settings, ui_id, job_id):
'\n Initialises the slurm scheduler class for Bilby\n\n :param settings: The settings from settings.py\n :param ui_id: The UI id of the job\n :param job_id: The Slurm id of the Job\n '
super().__init__(settings, ui_id, job_id)
self.slurm_template = 'settings/bilby_slurm.sh'
self.nodes = 1
self.tasks_per_node = 1
self.memory = 4096
self.walltime = ((60 * 60) * 24)
self.job_name = ('bilby_' + str(uuid.uuid4()))
self.job_parameter_file = os.path.join(self.get_working_directory(), 'json_params.json')
self.job_output_directory = os.path.join(self.get_working_directory(), 'output')
|
def __init__(self, settings, ui_id, job_id):
'\n Initialises the slurm scheduler class for Bilby\n\n :param settings: The settings from settings.py\n :param ui_id: The UI id of the job\n :param job_id: The Slurm id of the Job\n '
super().__init__(settings, ui_id, job_id)
self.slurm_template = 'settings/bilby_slurm.sh'
self.nodes = 1
self.tasks_per_node = 1
self.memory = 4096
self.walltime = ((60 * 60) * 24)
self.job_name = ('bilby_' + str(uuid.uuid4()))
self.job_parameter_file = os.path.join(self.get_working_directory(), 'json_params.json')
self.job_output_directory = os.path.join(self.get_working_directory(), 'output')<|docstring|>Initialises the slurm scheduler class for Bilby
:param settings: The settings from settings.py
:param ui_id: The UI id of the job
:param job_id: The Slurm id of the Job<|endoftext|>
|
a08b4cdd6586afad4591ecc8f2c00aced1c55658b863cbd052d354473a5f2ce9
|
def generate_template_dict(self):
'\n Called before a job is submitted before writing the slurm script\n\n We add in our custom slurm arguments\n\n :return: A dict of key/value pairs used in the slurm script template\n '
params = super().generate_template_dict()
params['job_parameter_file'] = self.job_parameter_file
params['job_output_directory'] = self.job_output_directory
return params
|
Called before a job is submitted before writing the slurm script
We add in our custom slurm arguments
:return: A dict of key/value pairs used in the slurm script template
|
misc/job_controller_scripts/slurm/bilby_slurm.py
|
generate_template_dict
|
ASVO-TAO/SS18B-PLasky
| 0 |
python
|
def generate_template_dict(self):
'\n Called before a job is submitted before writing the slurm script\n\n We add in our custom slurm arguments\n\n :return: A dict of key/value pairs used in the slurm script template\n '
params = super().generate_template_dict()
params['job_parameter_file'] = self.job_parameter_file
params['job_output_directory'] = self.job_output_directory
return params
|
def generate_template_dict(self):
'\n Called before a job is submitted before writing the slurm script\n\n We add in our custom slurm arguments\n\n :return: A dict of key/value pairs used in the slurm script template\n '
params = super().generate_template_dict()
params['job_parameter_file'] = self.job_parameter_file
params['job_output_directory'] = self.job_output_directory
return params<|docstring|>Called before a job is submitted before writing the slurm script
We add in our custom slurm arguments
:return: A dict of key/value pairs used in the slurm script template<|endoftext|>
|
9f6cbbf7cf1ea7dd150179128009e270a204002b9e1f3ff6fd881cb8d4d8e436
|
def submit(self, job_parameters):
'\n Called when a job is submitted\n\n :param job_parameters: The parameters for this job, this is a string representing a json dump\n :return: The super call return to submit\n '
job_parameters = json.loads(job_parameters)
job_parameters['name'] = 'bilby'
json.dump(job_parameters, open(self.job_parameter_file, 'w'))
return super().submit(job_parameters)
|
Called when a job is submitted
:param job_parameters: The parameters for this job, this is a string representing a json dump
:return: The super call return to submit
|
misc/job_controller_scripts/slurm/bilby_slurm.py
|
submit
|
ASVO-TAO/SS18B-PLasky
| 0 |
python
|
def submit(self, job_parameters):
'\n Called when a job is submitted\n\n :param job_parameters: The parameters for this job, this is a string representing a json dump\n :return: The super call return to submit\n '
job_parameters = json.loads(job_parameters)
job_parameters['name'] = 'bilby'
json.dump(job_parameters, open(self.job_parameter_file, 'w'))
return super().submit(job_parameters)
|
def submit(self, job_parameters):
'\n Called when a job is submitted\n\n :param job_parameters: The parameters for this job, this is a string representing a json dump\n :return: The super call return to submit\n '
job_parameters = json.loads(job_parameters)
job_parameters['name'] = 'bilby'
json.dump(job_parameters, open(self.job_parameter_file, 'w'))
return super().submit(job_parameters)<|docstring|>Called when a job is submitted
:param job_parameters: The parameters for this job, this is a string representing a json dump
:return: The super call return to submit<|endoftext|>
|
1fc044d117f562172c53044908c5023f73284bc39d9c6615996c656a425006fb
|
def load_blend_results(path, survey):
'Load results exported from a DrawBlendsGenerator.\n\n Args;\n path (str): Path to the files. Should be the same as the save_path\n which was provided to the DrawBlendsGenerator to save\n the files.\n survey (str): Name of the survey for which you want to load the files.\n\n Returns:\n Dictionnary containing the blend images, the isolated images and the\n informations about the blends.\n '
blend_images = np.load(os.path.join(path, survey, 'blended.npy'), allow_pickle=True)
isolated_images = np.load(os.path.join(path, survey, 'isolated.npy'), allow_pickle=True)
blend_list = [Table.read(os.path.join(path, survey, f'blend_info_{i}'), format='ascii') for i in range(blend_images.shape[0])]
return {'blend_images': blend_images, 'isolated_images': isolated_images, 'blend_list': blend_list}
|
Load results exported from a DrawBlendsGenerator.
Args;
path (str): Path to the files. Should be the same as the save_path
which was provided to the DrawBlendsGenerator to save
the files.
survey (str): Name of the survey for which you want to load the files.
Returns:
Dictionnary containing the blend images, the isolated images and the
informations about the blends.
|
btk/utils.py
|
load_blend_results
|
b-biswas/BlendingToolKit
| 16 |
python
|
def load_blend_results(path, survey):
'Load results exported from a DrawBlendsGenerator.\n\n Args;\n path (str): Path to the files. Should be the same as the save_path\n which was provided to the DrawBlendsGenerator to save\n the files.\n survey (str): Name of the survey for which you want to load the files.\n\n Returns:\n Dictionnary containing the blend images, the isolated images and the\n informations about the blends.\n '
blend_images = np.load(os.path.join(path, survey, 'blended.npy'), allow_pickle=True)
isolated_images = np.load(os.path.join(path, survey, 'isolated.npy'), allow_pickle=True)
blend_list = [Table.read(os.path.join(path, survey, f'blend_info_{i}'), format='ascii') for i in range(blend_images.shape[0])]
return {'blend_images': blend_images, 'isolated_images': isolated_images, 'blend_list': blend_list}
|
def load_blend_results(path, survey):
'Load results exported from a DrawBlendsGenerator.\n\n Args;\n path (str): Path to the files. Should be the same as the save_path\n which was provided to the DrawBlendsGenerator to save\n the files.\n survey (str): Name of the survey for which you want to load the files.\n\n Returns:\n Dictionnary containing the blend images, the isolated images and the\n informations about the blends.\n '
blend_images = np.load(os.path.join(path, survey, 'blended.npy'), allow_pickle=True)
isolated_images = np.load(os.path.join(path, survey, 'isolated.npy'), allow_pickle=True)
blend_list = [Table.read(os.path.join(path, survey, f'blend_info_{i}'), format='ascii') for i in range(blend_images.shape[0])]
return {'blend_images': blend_images, 'isolated_images': isolated_images, 'blend_list': blend_list}<|docstring|>Load results exported from a DrawBlendsGenerator.
Args;
path (str): Path to the files. Should be the same as the save_path
which was provided to the DrawBlendsGenerator to save
the files.
survey (str): Name of the survey for which you want to load the files.
Returns:
Dictionnary containing the blend images, the isolated images and the
informations about the blends.<|endoftext|>
|
ecdb6a57fa48a5ababd9975b992ad621d4e81ad9d1cc2c1e9d5b2e2d44fa0c86
|
def load_measure_results(path, measure_name, n_batch):
'Load results exported from a MeasureGenerator.\n\n Args:\n path (str): Path to the files. Should be the same as the save_path\n which was provided to the MeasureGenerator to save\n the files.\n measure_name (str): Name of the measure function for which you\n want to load the files\n n_batch (int): Number of blends in the batch you want to load\n\n Returns:\n Dictionnary containing the detection catalogs, the segmentations\n and the deblended images.\n '
measure_results = {}
for key in ['segmentation', 'deblended_images']:
try:
measure_results[key] = np.load(os.path.join(path, measure_name, f'{key}.npy'), allow_pickle=True)
except FileNotFoundError:
print(f'No {key} found.')
catalog = [Table.read(os.path.join(path, measure_name, f'detection_catalog_{j}'), format='ascii') for j in range(n_batch)]
measure_results['catalog'] = catalog
return measure_results
|
Load results exported from a MeasureGenerator.
Args:
path (str): Path to the files. Should be the same as the save_path
which was provided to the MeasureGenerator to save
the files.
measure_name (str): Name of the measure function for which you
want to load the files
n_batch (int): Number of blends in the batch you want to load
Returns:
Dictionnary containing the detection catalogs, the segmentations
and the deblended images.
|
btk/utils.py
|
load_measure_results
|
b-biswas/BlendingToolKit
| 16 |
python
|
def load_measure_results(path, measure_name, n_batch):
'Load results exported from a MeasureGenerator.\n\n Args:\n path (str): Path to the files. Should be the same as the save_path\n which was provided to the MeasureGenerator to save\n the files.\n measure_name (str): Name of the measure function for which you\n want to load the files\n n_batch (int): Number of blends in the batch you want to load\n\n Returns:\n Dictionnary containing the detection catalogs, the segmentations\n and the deblended images.\n '
measure_results = {}
for key in ['segmentation', 'deblended_images']:
try:
measure_results[key] = np.load(os.path.join(path, measure_name, f'{key}.npy'), allow_pickle=True)
except FileNotFoundError:
print(f'No {key} found.')
catalog = [Table.read(os.path.join(path, measure_name, f'detection_catalog_{j}'), format='ascii') for j in range(n_batch)]
measure_results['catalog'] = catalog
return measure_results
|
def load_measure_results(path, measure_name, n_batch):
'Load results exported from a MeasureGenerator.\n\n Args:\n path (str): Path to the files. Should be the same as the save_path\n which was provided to the MeasureGenerator to save\n the files.\n measure_name (str): Name of the measure function for which you\n want to load the files\n n_batch (int): Number of blends in the batch you want to load\n\n Returns:\n Dictionnary containing the detection catalogs, the segmentations\n and the deblended images.\n '
measure_results = {}
for key in ['segmentation', 'deblended_images']:
try:
measure_results[key] = np.load(os.path.join(path, measure_name, f'{key}.npy'), allow_pickle=True)
except FileNotFoundError:
print(f'No {key} found.')
catalog = [Table.read(os.path.join(path, measure_name, f'detection_catalog_{j}'), format='ascii') for j in range(n_batch)]
measure_results['catalog'] = catalog
return measure_results<|docstring|>Load results exported from a MeasureGenerator.
Args:
path (str): Path to the files. Should be the same as the save_path
which was provided to the MeasureGenerator to save
the files.
measure_name (str): Name of the measure function for which you
want to load the files
n_batch (int): Number of blends in the batch you want to load
Returns:
Dictionnary containing the detection catalogs, the segmentations
and the deblended images.<|endoftext|>
|
7d08e6d3425eead9dc97e642a3b69b7184755c936562b96e618f085ec6fb73b3
|
def load_metrics_results(path, measure_name, survey_name):
'Load results exported from a MetricsGenerator.\n\n Args:\n path (str): Path to the files. Should be the same as the save_path\n which was provided to the MetricsGenerator to save\n the files.\n measure_name (str): Name of the measure function for which you\n want to load the files\n\n Returns:\n Dictionnary containing the detection catalogs, the segmentations\n and the deblended images.\n '
metrics_results = {}
for key in ['detection', 'segmentation', 'reconstruction']:
try:
metrics_results[key] = np.load(os.path.join(path, measure_name, survey_name, f'{key}_metric.npy'), allow_pickle=True)
except FileNotFoundError:
print(f'No {key} metrics found.')
metrics_results['galaxy_summary'] = Table.read(os.path.join(path, measure_name, survey_name, 'galaxy_summary'), format='ascii')
return metrics_results
|
Load results exported from a MetricsGenerator.
Args:
path (str): Path to the files. Should be the same as the save_path
which was provided to the MetricsGenerator to save
the files.
measure_name (str): Name of the measure function for which you
want to load the files
Returns:
Dictionnary containing the detection catalogs, the segmentations
and the deblended images.
|
btk/utils.py
|
load_metrics_results
|
b-biswas/BlendingToolKit
| 16 |
python
|
def load_metrics_results(path, measure_name, survey_name):
'Load results exported from a MetricsGenerator.\n\n Args:\n path (str): Path to the files. Should be the same as the save_path\n which was provided to the MetricsGenerator to save\n the files.\n measure_name (str): Name of the measure function for which you\n want to load the files\n\n Returns:\n Dictionnary containing the detection catalogs, the segmentations\n and the deblended images.\n '
metrics_results = {}
for key in ['detection', 'segmentation', 'reconstruction']:
try:
metrics_results[key] = np.load(os.path.join(path, measure_name, survey_name, f'{key}_metric.npy'), allow_pickle=True)
except FileNotFoundError:
print(f'No {key} metrics found.')
metrics_results['galaxy_summary'] = Table.read(os.path.join(path, measure_name, survey_name, 'galaxy_summary'), format='ascii')
return metrics_results
|
def load_metrics_results(path, measure_name, survey_name):
'Load results exported from a MetricsGenerator.\n\n Args:\n path (str): Path to the files. Should be the same as the save_path\n which was provided to the MetricsGenerator to save\n the files.\n measure_name (str): Name of the measure function for which you\n want to load the files\n\n Returns:\n Dictionnary containing the detection catalogs, the segmentations\n and the deblended images.\n '
metrics_results = {}
for key in ['detection', 'segmentation', 'reconstruction']:
try:
metrics_results[key] = np.load(os.path.join(path, measure_name, survey_name, f'{key}_metric.npy'), allow_pickle=True)
except FileNotFoundError:
print(f'No {key} metrics found.')
metrics_results['galaxy_summary'] = Table.read(os.path.join(path, measure_name, survey_name, 'galaxy_summary'), format='ascii')
return metrics_results<|docstring|>Load results exported from a MetricsGenerator.
Args:
path (str): Path to the files. Should be the same as the save_path
which was provided to the MetricsGenerator to save
the files.
measure_name (str): Name of the measure function for which you
want to load the files
Returns:
Dictionnary containing the detection catalogs, the segmentations
and the deblended images.<|endoftext|>
|
36bf89fd923fd20ff5051bfac44ee7ba496d163224fa49ae917f53220cdd4d9f
|
def load_all_results(path, surveys, measure_names, n_batch, n_meas_kwargs=1):
'Load results exported from a MetricsGenerator.\n\n Args:\n path (str): Path to the files. Should be the same as the save_path\n which was provided to the MetricsGenerator to save\n the files.\n surveys (list): Names of the surveys for which you want to load\n the files\n measure_names (list): Names of the measure functions for which you\n want to load the files\n n_batch (int): Number of blends in the batch you want to load\n\n Returns:\n The three dictionnaries corresponding to the results.\n '
blend_results = {}
for key in BLEND_RESULT_KEYS:
blend_results[key] = {}
measure_results = {'catalog': {}, 'segmentation': {}, 'deblended_images': {}}
metrics_results = {'detection': {}, 'segmentation': {}, 'reconstruction': {}, 'galaxy_summary': {}}
for s in surveys:
blend_results_temp = load_blend_results(path, s)
for key in BLEND_RESULT_KEYS:
blend_results[key][s] = blend_results_temp[key]
for meas in measure_names:
for n in range(n_meas_kwargs):
dir_name = ((meas + str(n)) if (n_meas_kwargs > 1) else meas)
meas_results = load_measure_results(path, dir_name, n_batch)
for k in meas_results.keys():
measure_results[k][dir_name] = meas_results[k]
for k in metrics_results.keys():
metrics_results[k][dir_name] = {}
if (len(surveys) > 1):
for s in surveys:
metr_results = load_metrics_results(path, dir_name, s)
for k in metr_results.keys():
metrics_results[k][dir_name][s] = metr_results[k]
else:
metr_results = load_metrics_results(path, dir_name, surveys[0])
for k in metr_results.keys():
metrics_results[k][dir_name] = metr_results[k]
return (blend_results, measure_results, metrics_results)
|
Load results exported from a MetricsGenerator.
Args:
path (str): Path to the files. Should be the same as the save_path
which was provided to the MetricsGenerator to save
the files.
surveys (list): Names of the surveys for which you want to load
the files
measure_names (list): Names of the measure functions for which you
want to load the files
n_batch (int): Number of blends in the batch you want to load
Returns:
The three dictionnaries corresponding to the results.
|
btk/utils.py
|
load_all_results
|
b-biswas/BlendingToolKit
| 16 |
python
|
def load_all_results(path, surveys, measure_names, n_batch, n_meas_kwargs=1):
'Load results exported from a MetricsGenerator.\n\n Args:\n path (str): Path to the files. Should be the same as the save_path\n which was provided to the MetricsGenerator to save\n the files.\n surveys (list): Names of the surveys for which you want to load\n the files\n measure_names (list): Names of the measure functions for which you\n want to load the files\n n_batch (int): Number of blends in the batch you want to load\n\n Returns:\n The three dictionnaries corresponding to the results.\n '
blend_results = {}
for key in BLEND_RESULT_KEYS:
blend_results[key] = {}
measure_results = {'catalog': {}, 'segmentation': {}, 'deblended_images': {}}
metrics_results = {'detection': {}, 'segmentation': {}, 'reconstruction': {}, 'galaxy_summary': {}}
for s in surveys:
blend_results_temp = load_blend_results(path, s)
for key in BLEND_RESULT_KEYS:
blend_results[key][s] = blend_results_temp[key]
for meas in measure_names:
for n in range(n_meas_kwargs):
dir_name = ((meas + str(n)) if (n_meas_kwargs > 1) else meas)
meas_results = load_measure_results(path, dir_name, n_batch)
for k in meas_results.keys():
measure_results[k][dir_name] = meas_results[k]
for k in metrics_results.keys():
metrics_results[k][dir_name] = {}
if (len(surveys) > 1):
for s in surveys:
metr_results = load_metrics_results(path, dir_name, s)
for k in metr_results.keys():
metrics_results[k][dir_name][s] = metr_results[k]
else:
metr_results = load_metrics_results(path, dir_name, surveys[0])
for k in metr_results.keys():
metrics_results[k][dir_name] = metr_results[k]
return (blend_results, measure_results, metrics_results)
|
def load_all_results(path, surveys, measure_names, n_batch, n_meas_kwargs=1):
'Load results exported from a MetricsGenerator.\n\n Args:\n path (str): Path to the files. Should be the same as the save_path\n which was provided to the MetricsGenerator to save\n the files.\n surveys (list): Names of the surveys for which you want to load\n the files\n measure_names (list): Names of the measure functions for which you\n want to load the files\n n_batch (int): Number of blends in the batch you want to load\n\n Returns:\n The three dictionnaries corresponding to the results.\n '
blend_results = {}
for key in BLEND_RESULT_KEYS:
blend_results[key] = {}
measure_results = {'catalog': {}, 'segmentation': {}, 'deblended_images': {}}
metrics_results = {'detection': {}, 'segmentation': {}, 'reconstruction': {}, 'galaxy_summary': {}}
for s in surveys:
blend_results_temp = load_blend_results(path, s)
for key in BLEND_RESULT_KEYS:
blend_results[key][s] = blend_results_temp[key]
for meas in measure_names:
for n in range(n_meas_kwargs):
dir_name = ((meas + str(n)) if (n_meas_kwargs > 1) else meas)
meas_results = load_measure_results(path, dir_name, n_batch)
for k in meas_results.keys():
measure_results[k][dir_name] = meas_results[k]
for k in metrics_results.keys():
metrics_results[k][dir_name] = {}
if (len(surveys) > 1):
for s in surveys:
metr_results = load_metrics_results(path, dir_name, s)
for k in metr_results.keys():
metrics_results[k][dir_name][s] = metr_results[k]
else:
metr_results = load_metrics_results(path, dir_name, surveys[0])
for k in metr_results.keys():
metrics_results[k][dir_name] = metr_results[k]
return (blend_results, measure_results, metrics_results)<|docstring|>Load results exported from a MetricsGenerator.
Args:
path (str): Path to the files. Should be the same as the save_path
which was provided to the MetricsGenerator to save
the files.
surveys (list): Names of the surveys for which you want to load
the files
measure_names (list): Names of the measure functions for which you
want to load the files
n_batch (int): Number of blends in the batch you want to load
Returns:
The three dictionnaries corresponding to the results.<|endoftext|>
|
457bace977144cccf9cb074dc1e5f96fc5d29f189a1dd10229fb3128a628e9cc
|
def reverse_list_dictionary(to_reverse, keys):
'Transforms a list of dictionaries into a dictionary of lists.\n\n Additionally, if the initial list contains None instead of dictionaries,\n the dictionnary will contain lists of None.\n Mainly used in the measure.py file.\n\n Args:\n to_reverse (list): List to reverse, should contain dictionaries (or None)\n keys (list): Keys of the dictionaries inside the list.\n\n Returns:\n Dictionary.\n '
if (to_reverse[0] is None):
to_reverse = {k: [None for _ in range(len(to_reverse))] for k in keys}
else:
to_reverse = {k: [to_reverse[n][k] for n in range(len(to_reverse))] for k in keys}
return to_reverse
|
Transforms a list of dictionaries into a dictionary of lists.
Additionally, if the initial list contains None instead of dictionaries,
the dictionnary will contain lists of None.
Mainly used in the measure.py file.
Args:
to_reverse (list): List to reverse, should contain dictionaries (or None)
keys (list): Keys of the dictionaries inside the list.
Returns:
Dictionary.
|
btk/utils.py
|
reverse_list_dictionary
|
b-biswas/BlendingToolKit
| 16 |
python
|
def reverse_list_dictionary(to_reverse, keys):
'Transforms a list of dictionaries into a dictionary of lists.\n\n Additionally, if the initial list contains None instead of dictionaries,\n the dictionnary will contain lists of None.\n Mainly used in the measure.py file.\n\n Args:\n to_reverse (list): List to reverse, should contain dictionaries (or None)\n keys (list): Keys of the dictionaries inside the list.\n\n Returns:\n Dictionary.\n '
if (to_reverse[0] is None):
to_reverse = {k: [None for _ in range(len(to_reverse))] for k in keys}
else:
to_reverse = {k: [to_reverse[n][k] for n in range(len(to_reverse))] for k in keys}
return to_reverse
|
def reverse_list_dictionary(to_reverse, keys):
'Transforms a list of dictionaries into a dictionary of lists.\n\n Additionally, if the initial list contains None instead of dictionaries,\n the dictionnary will contain lists of None.\n Mainly used in the measure.py file.\n\n Args:\n to_reverse (list): List to reverse, should contain dictionaries (or None)\n keys (list): Keys of the dictionaries inside the list.\n\n Returns:\n Dictionary.\n '
if (to_reverse[0] is None):
to_reverse = {k: [None for _ in range(len(to_reverse))] for k in keys}
else:
to_reverse = {k: [to_reverse[n][k] for n in range(len(to_reverse))] for k in keys}
return to_reverse<|docstring|>Transforms a list of dictionaries into a dictionary of lists.
Additionally, if the initial list contains None instead of dictionaries,
the dictionnary will contain lists of None.
Mainly used in the measure.py file.
Args:
to_reverse (list): List to reverse, should contain dictionaries (or None)
keys (list): Keys of the dictionaries inside the list.
Returns:
Dictionary.<|endoftext|>
|
8c7311774d3e73cd98334b56f6f6ff046a03f902e16f4b465f7db67191c8aac0
|
def reverse_dictionary_dictionary(to_reverse):
'Exchanges two dictionary layers.\n\n For instance, dic[keyA][key1] will become dic[key1][keyA].\n\n Args:\n to_reverse (dict): Dictionary of dictionaries.\n\n Returns:\n Reversed dictionary.\n '
first_keys = list(to_reverse.keys())
second_keys = list(to_reverse[first_keys[0]].keys())
return {s_key: {f_key: to_reverse[f_key][s_key] for f_key in first_keys} for s_key in second_keys}
|
Exchanges two dictionary layers.
For instance, dic[keyA][key1] will become dic[key1][keyA].
Args:
to_reverse (dict): Dictionary of dictionaries.
Returns:
Reversed dictionary.
|
btk/utils.py
|
reverse_dictionary_dictionary
|
b-biswas/BlendingToolKit
| 16 |
python
|
def reverse_dictionary_dictionary(to_reverse):
'Exchanges two dictionary layers.\n\n For instance, dic[keyA][key1] will become dic[key1][keyA].\n\n Args:\n to_reverse (dict): Dictionary of dictionaries.\n\n Returns:\n Reversed dictionary.\n '
first_keys = list(to_reverse.keys())
second_keys = list(to_reverse[first_keys[0]].keys())
return {s_key: {f_key: to_reverse[f_key][s_key] for f_key in first_keys} for s_key in second_keys}
|
def reverse_dictionary_dictionary(to_reverse):
'Exchanges two dictionary layers.\n\n For instance, dic[keyA][key1] will become dic[key1][keyA].\n\n Args:\n to_reverse (dict): Dictionary of dictionaries.\n\n Returns:\n Reversed dictionary.\n '
first_keys = list(to_reverse.keys())
second_keys = list(to_reverse[first_keys[0]].keys())
return {s_key: {f_key: to_reverse[f_key][s_key] for f_key in first_keys} for s_key in second_keys}<|docstring|>Exchanges two dictionary layers.
For instance, dic[keyA][key1] will become dic[key1][keyA].
Args:
to_reverse (dict): Dictionary of dictionaries.
Returns:
Reversed dictionary.<|endoftext|>
|
c0954b0c80e4c94113e696e94f51c709ef9e1c5ef6820158e4d71a948c1d97b5
|
def __init__(self, domain, discretization=20, seed=1):
'\n :param domain: the problem :py:class:`~rlpy.Domains.Domain.Domain` to learn\n :param discretization: Number of bins used for each continuous dimension.\n For discrete dimensions, this parameter is ignored.\n '
for v in ['features_num']:
if (getattr(self, v) is None):
raise Exception(('Missed domain initialization of ' + v))
self.expectedStepCached = {}
self.setBinsPerDimension(domain, discretization)
self.domain = domain
self.state_space_dims = domain.state_space_dims
self.actions_num = domain.actions_num
self.discretization = discretization
try:
self.weight_vec = np.zeros((self.features_num * self.actions_num))
except MemoryError as m:
print((('Unable to allocate weights of size: %d\n' % self.features_num) * self.actions_num))
raise m
self._phi_sa_cache = np.empty((self.actions_num, self.features_num))
self._arange_cache = np.arange(self.features_num)
self.agg_states_num = np.prod(self.bins_per_dim.astype('uint64'))
self.logger = logging.getLogger(('rlpy.Representations.' + self.__class__.__name__))
self.random_state = np.random.RandomState(seed=seed)
|
:param domain: the problem :py:class:`~rlpy.Domains.Domain.Domain` to learn
:param discretization: Number of bins used for each continuous dimension.
For discrete dimensions, this parameter is ignored.
|
rlpy/Representations/Representation.py
|
__init__
|
okkhoy/rlpy
| 265 |
python
|
def __init__(self, domain, discretization=20, seed=1):
'\n :param domain: the problem :py:class:`~rlpy.Domains.Domain.Domain` to learn\n :param discretization: Number of bins used for each continuous dimension.\n For discrete dimensions, this parameter is ignored.\n '
for v in ['features_num']:
if (getattr(self, v) is None):
raise Exception(('Missed domain initialization of ' + v))
self.expectedStepCached = {}
self.setBinsPerDimension(domain, discretization)
self.domain = domain
self.state_space_dims = domain.state_space_dims
self.actions_num = domain.actions_num
self.discretization = discretization
try:
self.weight_vec = np.zeros((self.features_num * self.actions_num))
except MemoryError as m:
print((('Unable to allocate weights of size: %d\n' % self.features_num) * self.actions_num))
raise m
self._phi_sa_cache = np.empty((self.actions_num, self.features_num))
self._arange_cache = np.arange(self.features_num)
self.agg_states_num = np.prod(self.bins_per_dim.astype('uint64'))
self.logger = logging.getLogger(('rlpy.Representations.' + self.__class__.__name__))
self.random_state = np.random.RandomState(seed=seed)
|
def __init__(self, domain, discretization=20, seed=1):
'\n :param domain: the problem :py:class:`~rlpy.Domains.Domain.Domain` to learn\n :param discretization: Number of bins used for each continuous dimension.\n For discrete dimensions, this parameter is ignored.\n '
for v in ['features_num']:
if (getattr(self, v) is None):
raise Exception(('Missed domain initialization of ' + v))
self.expectedStepCached = {}
self.setBinsPerDimension(domain, discretization)
self.domain = domain
self.state_space_dims = domain.state_space_dims
self.actions_num = domain.actions_num
self.discretization = discretization
try:
self.weight_vec = np.zeros((self.features_num * self.actions_num))
except MemoryError as m:
print((('Unable to allocate weights of size: %d\n' % self.features_num) * self.actions_num))
raise m
self._phi_sa_cache = np.empty((self.actions_num, self.features_num))
self._arange_cache = np.arange(self.features_num)
self.agg_states_num = np.prod(self.bins_per_dim.astype('uint64'))
self.logger = logging.getLogger(('rlpy.Representations.' + self.__class__.__name__))
self.random_state = np.random.RandomState(seed=seed)<|docstring|>:param domain: the problem :py:class:`~rlpy.Domains.Domain.Domain` to learn
:param discretization: Number of bins used for each continuous dimension.
For discrete dimensions, this parameter is ignored.<|endoftext|>
|
4902058c6089f06ccb69fb525ecd002903c1f352084a5568a63fe13ac68d4a4d
|
def init_randomization(self):
'\n Any stochastic behavior in __init__() is broken out into this function\n so that if the random seed is later changed (eg, by the Experiment),\n other member variables and functions are updated accordingly.\n \n '
pass
|
Any stochastic behavior in __init__() is broken out into this function
so that if the random seed is later changed (eg, by the Experiment),
other member variables and functions are updated accordingly.
|
rlpy/Representations/Representation.py
|
init_randomization
|
okkhoy/rlpy
| 265 |
python
|
def init_randomization(self):
'\n Any stochastic behavior in __init__() is broken out into this function\n so that if the random seed is later changed (eg, by the Experiment),\n other member variables and functions are updated accordingly.\n \n '
pass
|
def init_randomization(self):
'\n Any stochastic behavior in __init__() is broken out into this function\n so that if the random seed is later changed (eg, by the Experiment),\n other member variables and functions are updated accordingly.\n \n '
pass<|docstring|>Any stochastic behavior in __init__() is broken out into this function
so that if the random seed is later changed (eg, by the Experiment),
other member variables and functions are updated accordingly.<|endoftext|>
|
0db76094d7bf6454f38d112effaaf0aded30a20ae034cc48fc8308968c7de39a
|
def V(self, s, terminal, p_actions, phi_s=None):
' Returns the value of state s under possible actions p_actions.\n\n :param s: The queried state\n :param terminal: Whether or not *s* is a terminal state\n :param p_actions: the set of possible actions\n :param phi_s: (optional) The feature vector evaluated at state s.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n\n See :py:meth:`~rlpy.Representations.Representation.Representation.Qs`.\n '
if (phi_s is None):
phi_s = self.phi(s, terminal)
AllQs = self.Qs(s, terminal, phi_s)
if len(p_actions):
return max(AllQs[p_actions])
else:
return 0
|
Returns the value of state s under possible actions p_actions.
:param s: The queried state
:param terminal: Whether or not *s* is a terminal state
:param p_actions: the set of possible actions
:param phi_s: (optional) The feature vector evaluated at state s.
If the feature vector phi(s) has already been cached,
pass it here as input so that it need not be computed again.
See :py:meth:`~rlpy.Representations.Representation.Representation.Qs`.
|
rlpy/Representations/Representation.py
|
V
|
okkhoy/rlpy
| 265 |
python
|
def V(self, s, terminal, p_actions, phi_s=None):
' Returns the value of state s under possible actions p_actions.\n\n :param s: The queried state\n :param terminal: Whether or not *s* is a terminal state\n :param p_actions: the set of possible actions\n :param phi_s: (optional) The feature vector evaluated at state s.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n\n See :py:meth:`~rlpy.Representations.Representation.Representation.Qs`.\n '
if (phi_s is None):
phi_s = self.phi(s, terminal)
AllQs = self.Qs(s, terminal, phi_s)
if len(p_actions):
return max(AllQs[p_actions])
else:
return 0
|
def V(self, s, terminal, p_actions, phi_s=None):
' Returns the value of state s under possible actions p_actions.\n\n :param s: The queried state\n :param terminal: Whether or not *s* is a terminal state\n :param p_actions: the set of possible actions\n :param phi_s: (optional) The feature vector evaluated at state s.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n\n See :py:meth:`~rlpy.Representations.Representation.Representation.Qs`.\n '
if (phi_s is None):
phi_s = self.phi(s, terminal)
AllQs = self.Qs(s, terminal, phi_s)
if len(p_actions):
return max(AllQs[p_actions])
else:
return 0<|docstring|>Returns the value of state s under possible actions p_actions.
:param s: The queried state
:param terminal: Whether or not *s* is a terminal state
:param p_actions: the set of possible actions
:param phi_s: (optional) The feature vector evaluated at state s.
If the feature vector phi(s) has already been cached,
pass it here as input so that it need not be computed again.
See :py:meth:`~rlpy.Representations.Representation.Representation.Qs`.<|endoftext|>
|
096ef6e2d64e35240b084f68ab720fcee4e659e4c9601f0befba943f4e2adc2f
|
def Qs(self, s, terminal, phi_s=None):
'\n Returns an array of actions available at a state and their\n associated values.\n\n :param s: The queried state\n :param terminal: Whether or not *s* is a terminal state\n :param phi_s: (optional) The feature vector evaluated at state s.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n\n :return: The tuple (Q,A) where:\n - Q: an array of Q(s,a), the values of each action at *s*. \n\n - A: the corresponding array of actionIDs (integers)\n\n .. note::\n This function is distinct\n from :py:meth:`~rlpy.Representations.Representation.Representation.Q`,\n which computes the Q function for an (s,a) pair. \n\n Instead, this function ``Qs()`` computes all Q function values\n (for all possible actions) at a given state *s*.\n\n '
if (phi_s is None):
phi_s = self.phi(s, terminal)
if (len(phi_s) == 0):
return np.zeros(self.actions_num)
weight_vec_prime = self.weight_vec.reshape((- 1), self.features_num)
if (self._phi_sa_cache.shape != (self.actions_num, self.features_num)):
self._phi_sa_cache = np.empty((self.actions_num, self.features_num))
Q = np.multiply(weight_vec_prime, phi_s, out=self._phi_sa_cache).sum(axis=1)
return Q
|
Returns an array of actions available at a state and their
associated values.
:param s: The queried state
:param terminal: Whether or not *s* is a terminal state
:param phi_s: (optional) The feature vector evaluated at state s.
If the feature vector phi(s) has already been cached,
pass it here as input so that it need not be computed again.
:return: The tuple (Q,A) where:
- Q: an array of Q(s,a), the values of each action at *s*.
- A: the corresponding array of actionIDs (integers)
.. note::
This function is distinct
from :py:meth:`~rlpy.Representations.Representation.Representation.Q`,
which computes the Q function for an (s,a) pair.
Instead, this function ``Qs()`` computes all Q function values
(for all possible actions) at a given state *s*.
|
rlpy/Representations/Representation.py
|
Qs
|
okkhoy/rlpy
| 265 |
python
|
def Qs(self, s, terminal, phi_s=None):
'\n Returns an array of actions available at a state and their\n associated values.\n\n :param s: The queried state\n :param terminal: Whether or not *s* is a terminal state\n :param phi_s: (optional) The feature vector evaluated at state s.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n\n :return: The tuple (Q,A) where:\n - Q: an array of Q(s,a), the values of each action at *s*. \n\n - A: the corresponding array of actionIDs (integers)\n\n .. note::\n This function is distinct\n from :py:meth:`~rlpy.Representations.Representation.Representation.Q`,\n which computes the Q function for an (s,a) pair. \n\n Instead, this function ``Qs()`` computes all Q function values\n (for all possible actions) at a given state *s*.\n\n '
if (phi_s is None):
phi_s = self.phi(s, terminal)
if (len(phi_s) == 0):
return np.zeros(self.actions_num)
weight_vec_prime = self.weight_vec.reshape((- 1), self.features_num)
if (self._phi_sa_cache.shape != (self.actions_num, self.features_num)):
self._phi_sa_cache = np.empty((self.actions_num, self.features_num))
Q = np.multiply(weight_vec_prime, phi_s, out=self._phi_sa_cache).sum(axis=1)
return Q
|
def Qs(self, s, terminal, phi_s=None):
'\n Returns an array of actions available at a state and their\n associated values.\n\n :param s: The queried state\n :param terminal: Whether or not *s* is a terminal state\n :param phi_s: (optional) The feature vector evaluated at state s.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n\n :return: The tuple (Q,A) where:\n - Q: an array of Q(s,a), the values of each action at *s*. \n\n - A: the corresponding array of actionIDs (integers)\n\n .. note::\n This function is distinct\n from :py:meth:`~rlpy.Representations.Representation.Representation.Q`,\n which computes the Q function for an (s,a) pair. \n\n Instead, this function ``Qs()`` computes all Q function values\n (for all possible actions) at a given state *s*.\n\n '
if (phi_s is None):
phi_s = self.phi(s, terminal)
if (len(phi_s) == 0):
return np.zeros(self.actions_num)
weight_vec_prime = self.weight_vec.reshape((- 1), self.features_num)
if (self._phi_sa_cache.shape != (self.actions_num, self.features_num)):
self._phi_sa_cache = np.empty((self.actions_num, self.features_num))
Q = np.multiply(weight_vec_prime, phi_s, out=self._phi_sa_cache).sum(axis=1)
return Q<|docstring|>Returns an array of actions available at a state and their
associated values.
:param s: The queried state
:param terminal: Whether or not *s* is a terminal state
:param phi_s: (optional) The feature vector evaluated at state s.
If the feature vector phi(s) has already been cached,
pass it here as input so that it need not be computed again.
:return: The tuple (Q,A) where:
- Q: an array of Q(s,a), the values of each action at *s*.
- A: the corresponding array of actionIDs (integers)
.. note::
This function is distinct
from :py:meth:`~rlpy.Representations.Representation.Representation.Q`,
which computes the Q function for an (s,a) pair.
Instead, this function ``Qs()`` computes all Q function values
(for all possible actions) at a given state *s*.<|endoftext|>
|
5b1ee7b741842444c5dc182bc5b169095bf3f0cf28d0828d5ae76608c69da8f7
|
def Q(self, s, terminal, a, phi_s=None):
' Returns the learned value of a state-action pair, *Q(s,a)*.\n\n :param s: The queried state in the state-action pair.\n :param terminal: Whether or not *s* is a terminal state\n :param a: The queried action in the state-action pair.\n :param phi_s: (optional) The feature vector evaluated at state s.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n\n :return: (float) the value of the state-action pair (s,a), Q(s,a).\n\n '
if (len(self.weight_vec) > 0):
(phi_sa, i, j) = self.phi_sa(s, terminal, a, phi_s, snippet=True)
return np.dot(phi_sa, self.weight_vec[i:j])
else:
return 0.0
|
Returns the learned value of a state-action pair, *Q(s,a)*.
:param s: The queried state in the state-action pair.
:param terminal: Whether or not *s* is a terminal state
:param a: The queried action in the state-action pair.
:param phi_s: (optional) The feature vector evaluated at state s.
If the feature vector phi(s) has already been cached,
pass it here as input so that it need not be computed again.
:return: (float) the value of the state-action pair (s,a), Q(s,a).
|
rlpy/Representations/Representation.py
|
Q
|
okkhoy/rlpy
| 265 |
python
|
def Q(self, s, terminal, a, phi_s=None):
' Returns the learned value of a state-action pair, *Q(s,a)*.\n\n :param s: The queried state in the state-action pair.\n :param terminal: Whether or not *s* is a terminal state\n :param a: The queried action in the state-action pair.\n :param phi_s: (optional) The feature vector evaluated at state s.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n\n :return: (float) the value of the state-action pair (s,a), Q(s,a).\n\n '
if (len(self.weight_vec) > 0):
(phi_sa, i, j) = self.phi_sa(s, terminal, a, phi_s, snippet=True)
return np.dot(phi_sa, self.weight_vec[i:j])
else:
return 0.0
|
def Q(self, s, terminal, a, phi_s=None):
' Returns the learned value of a state-action pair, *Q(s,a)*.\n\n :param s: The queried state in the state-action pair.\n :param terminal: Whether or not *s* is a terminal state\n :param a: The queried action in the state-action pair.\n :param phi_s: (optional) The feature vector evaluated at state s.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n\n :return: (float) the value of the state-action pair (s,a), Q(s,a).\n\n '
if (len(self.weight_vec) > 0):
(phi_sa, i, j) = self.phi_sa(s, terminal, a, phi_s, snippet=True)
return np.dot(phi_sa, self.weight_vec[i:j])
else:
return 0.0<|docstring|>Returns the learned value of a state-action pair, *Q(s,a)*.
:param s: The queried state in the state-action pair.
:param terminal: Whether or not *s* is a terminal state
:param a: The queried action in the state-action pair.
:param phi_s: (optional) The feature vector evaluated at state s.
If the feature vector phi(s) has already been cached,
pass it here as input so that it need not be computed again.
:return: (float) the value of the state-action pair (s,a), Q(s,a).<|endoftext|>
|
1c9112b0bffeb2cfef5cfdf0e85834c19771496a63ea653b22ef6f4685a0b2dd
|
def phi(self, s, terminal):
'\n Returns :py:meth:`~rlpy.Representations.Representation.Representation.phi_nonTerminal`\n for a given representation, or a zero feature vector in a terminal state.\n\n :param s: The state for which to compute the feature vector\n\n :return: numpy array, the feature vector evaluted at state *s*.\n\n .. note::\n If state *s* is terminal the feature vector is returned as zeros!\n This prevents the learning algorithm from wrongfully associating\n the end of one episode with the start of the next (e.g., thinking\n that reaching the terminal state causes it to teleport back to the\n start state s0).\n\n\n '
if (terminal or (self.features_num == 0)):
return np.zeros(self.features_num, 'bool')
else:
return self.phi_nonTerminal(s)
|
Returns :py:meth:`~rlpy.Representations.Representation.Representation.phi_nonTerminal`
for a given representation, or a zero feature vector in a terminal state.
:param s: The state for which to compute the feature vector
:return: numpy array, the feature vector evaluted at state *s*.
.. note::
If state *s* is terminal the feature vector is returned as zeros!
This prevents the learning algorithm from wrongfully associating
the end of one episode with the start of the next (e.g., thinking
that reaching the terminal state causes it to teleport back to the
start state s0).
|
rlpy/Representations/Representation.py
|
phi
|
okkhoy/rlpy
| 265 |
python
|
def phi(self, s, terminal):
'\n Returns :py:meth:`~rlpy.Representations.Representation.Representation.phi_nonTerminal`\n for a given representation, or a zero feature vector in a terminal state.\n\n :param s: The state for which to compute the feature vector\n\n :return: numpy array, the feature vector evaluted at state *s*.\n\n .. note::\n If state *s* is terminal the feature vector is returned as zeros!\n This prevents the learning algorithm from wrongfully associating\n the end of one episode with the start of the next (e.g., thinking\n that reaching the terminal state causes it to teleport back to the\n start state s0).\n\n\n '
if (terminal or (self.features_num == 0)):
return np.zeros(self.features_num, 'bool')
else:
return self.phi_nonTerminal(s)
|
def phi(self, s, terminal):
'\n Returns :py:meth:`~rlpy.Representations.Representation.Representation.phi_nonTerminal`\n for a given representation, or a zero feature vector in a terminal state.\n\n :param s: The state for which to compute the feature vector\n\n :return: numpy array, the feature vector evaluted at state *s*.\n\n .. note::\n If state *s* is terminal the feature vector is returned as zeros!\n This prevents the learning algorithm from wrongfully associating\n the end of one episode with the start of the next (e.g., thinking\n that reaching the terminal state causes it to teleport back to the\n start state s0).\n\n\n '
if (terminal or (self.features_num == 0)):
return np.zeros(self.features_num, 'bool')
else:
return self.phi_nonTerminal(s)<|docstring|>Returns :py:meth:`~rlpy.Representations.Representation.Representation.phi_nonTerminal`
for a given representation, or a zero feature vector in a terminal state.
:param s: The state for which to compute the feature vector
:return: numpy array, the feature vector evaluted at state *s*.
.. note::
If state *s* is terminal the feature vector is returned as zeros!
This prevents the learning algorithm from wrongfully associating
the end of one episode with the start of the next (e.g., thinking
that reaching the terminal state causes it to teleport back to the
start state s0).<|endoftext|>
|
0c449f7e19839a7524792004406cd04a0b6af864c70efcd2890e173927d32de5
|
def phi_sa(self, s, terminal, a, phi_s=None, snippet=False):
'\n Returns the feature vector corresponding to a state-action pair.\n We use the copy paste technique (Lagoudakis & Parr 2003).\n Essentially, we append the phi(s) vector to itself *|A|* times, where\n *|A|* is the size of the action space.\n We zero the feature values of all of these blocks except the one\n corresponding to the actionID *a*.\n\n When ``snippet == False`` we construct and return the full, sparse phi_sa.\n When ``snippet == True``, we return the tuple (phi_s, index1, index2)\n where index1 and index2 are the indices defining the ends of the phi_s\n block which WOULD be nonzero if we were to construct the full phi_sa.\n\n :param s: The queried state in the state-action pair.\n :param terminal: Whether or not *s* is a terminal state\n :param a: The queried action in the state-action pair.\n :param phi_s: (optional) The feature vector evaluated at state s.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n :param snippet: if ``True``, do not return a single phi_sa vector,\n but instead a tuple of the components needed to create it.\n See return value below.\n\n :return: If ``snippet==False``, return the enormous phi_sa vector\n constructed by the copy-paste method.\n If ``snippet==True``, do not construct phi_sa, only return\n a tuple (phi_s, index1, index2) as described above.\n\n '
if (phi_s is None):
phi_s = self.phi(s, terminal)
if (snippet is True):
return (phi_s, (a * self.features_num), ((a + 1) * self.features_num))
phi_sa = np.zeros((self.features_num * self.actions_num), dtype=phi_s.dtype)
if (self.features_num == 0):
return phi_sa
if (len(self._arange_cache) != self.features_num):
self._arange_cache = np.arange((a * self.features_num), ((a + 1) * self.features_num))
else:
self._arange_cache += ((a * self.features_num) - self._arange_cache[0])
phi_sa[self._arange_cache] = phi_s
return phi_sa
|
Returns the feature vector corresponding to a state-action pair.
We use the copy paste technique (Lagoudakis & Parr 2003).
Essentially, we append the phi(s) vector to itself *|A|* times, where
*|A|* is the size of the action space.
We zero the feature values of all of these blocks except the one
corresponding to the actionID *a*.
When ``snippet == False`` we construct and return the full, sparse phi_sa.
When ``snippet == True``, we return the tuple (phi_s, index1, index2)
where index1 and index2 are the indices defining the ends of the phi_s
block which WOULD be nonzero if we were to construct the full phi_sa.
:param s: The queried state in the state-action pair.
:param terminal: Whether or not *s* is a terminal state
:param a: The queried action in the state-action pair.
:param phi_s: (optional) The feature vector evaluated at state s.
If the feature vector phi(s) has already been cached,
pass it here as input so that it need not be computed again.
:param snippet: if ``True``, do not return a single phi_sa vector,
but instead a tuple of the components needed to create it.
See return value below.
:return: If ``snippet==False``, return the enormous phi_sa vector
constructed by the copy-paste method.
If ``snippet==True``, do not construct phi_sa, only return
a tuple (phi_s, index1, index2) as described above.
|
rlpy/Representations/Representation.py
|
phi_sa
|
okkhoy/rlpy
| 265 |
python
|
def phi_sa(self, s, terminal, a, phi_s=None, snippet=False):
'\n Returns the feature vector corresponding to a state-action pair.\n We use the copy paste technique (Lagoudakis & Parr 2003).\n Essentially, we append the phi(s) vector to itself *|A|* times, where\n *|A|* is the size of the action space.\n We zero the feature values of all of these blocks except the one\n corresponding to the actionID *a*.\n\n When ``snippet == False`` we construct and return the full, sparse phi_sa.\n When ``snippet == True``, we return the tuple (phi_s, index1, index2)\n where index1 and index2 are the indices defining the ends of the phi_s\n block which WOULD be nonzero if we were to construct the full phi_sa.\n\n :param s: The queried state in the state-action pair.\n :param terminal: Whether or not *s* is a terminal state\n :param a: The queried action in the state-action pair.\n :param phi_s: (optional) The feature vector evaluated at state s.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n :param snippet: if ``True``, do not return a single phi_sa vector,\n but instead a tuple of the components needed to create it.\n See return value below.\n\n :return: If ``snippet==False``, return the enormous phi_sa vector\n constructed by the copy-paste method.\n If ``snippet==True``, do not construct phi_sa, only return\n a tuple (phi_s, index1, index2) as described above.\n\n '
if (phi_s is None):
phi_s = self.phi(s, terminal)
if (snippet is True):
return (phi_s, (a * self.features_num), ((a + 1) * self.features_num))
phi_sa = np.zeros((self.features_num * self.actions_num), dtype=phi_s.dtype)
if (self.features_num == 0):
return phi_sa
if (len(self._arange_cache) != self.features_num):
self._arange_cache = np.arange((a * self.features_num), ((a + 1) * self.features_num))
else:
self._arange_cache += ((a * self.features_num) - self._arange_cache[0])
phi_sa[self._arange_cache] = phi_s
return phi_sa
|
def phi_sa(self, s, terminal, a, phi_s=None, snippet=False):
'\n Returns the feature vector corresponding to a state-action pair.\n We use the copy paste technique (Lagoudakis & Parr 2003).\n Essentially, we append the phi(s) vector to itself *|A|* times, where\n *|A|* is the size of the action space.\n We zero the feature values of all of these blocks except the one\n corresponding to the actionID *a*.\n\n When ``snippet == False`` we construct and return the full, sparse phi_sa.\n When ``snippet == True``, we return the tuple (phi_s, index1, index2)\n where index1 and index2 are the indices defining the ends of the phi_s\n block which WOULD be nonzero if we were to construct the full phi_sa.\n\n :param s: The queried state in the state-action pair.\n :param terminal: Whether or not *s* is a terminal state\n :param a: The queried action in the state-action pair.\n :param phi_s: (optional) The feature vector evaluated at state s.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n :param snippet: if ``True``, do not return a single phi_sa vector,\n but instead a tuple of the components needed to create it.\n See return value below.\n\n :return: If ``snippet==False``, return the enormous phi_sa vector\n constructed by the copy-paste method.\n If ``snippet==True``, do not construct phi_sa, only return\n a tuple (phi_s, index1, index2) as described above.\n\n '
if (phi_s is None):
phi_s = self.phi(s, terminal)
if (snippet is True):
return (phi_s, (a * self.features_num), ((a + 1) * self.features_num))
phi_sa = np.zeros((self.features_num * self.actions_num), dtype=phi_s.dtype)
if (self.features_num == 0):
return phi_sa
if (len(self._arange_cache) != self.features_num):
self._arange_cache = np.arange((a * self.features_num), ((a + 1) * self.features_num))
else:
self._arange_cache += ((a * self.features_num) - self._arange_cache[0])
phi_sa[self._arange_cache] = phi_s
return phi_sa<|docstring|>Returns the feature vector corresponding to a state-action pair.
We use the copy paste technique (Lagoudakis & Parr 2003).
Essentially, we append the phi(s) vector to itself *|A|* times, where
*|A|* is the size of the action space.
We zero the feature values of all of these blocks except the one
corresponding to the actionID *a*.
When ``snippet == False`` we construct and return the full, sparse phi_sa.
When ``snippet == True``, we return the tuple (phi_s, index1, index2)
where index1 and index2 are the indices defining the ends of the phi_s
block which WOULD be nonzero if we were to construct the full phi_sa.
:param s: The queried state in the state-action pair.
:param terminal: Whether or not *s* is a terminal state
:param a: The queried action in the state-action pair.
:param phi_s: (optional) The feature vector evaluated at state s.
If the feature vector phi(s) has already been cached,
pass it here as input so that it need not be computed again.
:param snippet: if ``True``, do not return a single phi_sa vector,
but instead a tuple of the components needed to create it.
See return value below.
:return: If ``snippet==False``, return the enormous phi_sa vector
constructed by the copy-paste method.
If ``snippet==True``, do not construct phi_sa, only return
a tuple (phi_s, index1, index2) as described above.<|endoftext|>
|
befe7cc3ef342188f46848d5e3e8af0435e4196fde4f4534059199383b664138
|
def addNewWeight(self):
'\n Add a new zero weight, corresponding to a newly added feature,\n to all actions.\n '
self.weight_vec = addNewElementForAllActions(self.weight_vec, self.actions_num)
|
Add a new zero weight, corresponding to a newly added feature,
to all actions.
|
rlpy/Representations/Representation.py
|
addNewWeight
|
okkhoy/rlpy
| 265 |
python
|
def addNewWeight(self):
'\n Add a new zero weight, corresponding to a newly added feature,\n to all actions.\n '
self.weight_vec = addNewElementForAllActions(self.weight_vec, self.actions_num)
|
def addNewWeight(self):
'\n Add a new zero weight, corresponding to a newly added feature,\n to all actions.\n '
self.weight_vec = addNewElementForAllActions(self.weight_vec, self.actions_num)<|docstring|>Add a new zero weight, corresponding to a newly added feature,
to all actions.<|endoftext|>
|
eb0619cb6dba62dc26905d287fc81cb3983292fda0c25af39e2558f4154e1e31
|
def hashState(self, s):
'\n Returns a unique id for a given state.\n Essentially, enumerate all possible states and return the ID associated\n with *s*.\n\n Under the hood: first, discretize continuous dimensions into bins\n as necessary. Then map the binstate to an integer.\n '
ds = self.binState(s)
return vec2id(ds, self.bins_per_dim)
|
Returns a unique id for a given state.
Essentially, enumerate all possible states and return the ID associated
with *s*.
Under the hood: first, discretize continuous dimensions into bins
as necessary. Then map the binstate to an integer.
|
rlpy/Representations/Representation.py
|
hashState
|
okkhoy/rlpy
| 265 |
python
|
def hashState(self, s):
'\n Returns a unique id for a given state.\n Essentially, enumerate all possible states and return the ID associated\n with *s*.\n\n Under the hood: first, discretize continuous dimensions into bins\n as necessary. Then map the binstate to an integer.\n '
ds = self.binState(s)
return vec2id(ds, self.bins_per_dim)
|
def hashState(self, s):
'\n Returns a unique id for a given state.\n Essentially, enumerate all possible states and return the ID associated\n with *s*.\n\n Under the hood: first, discretize continuous dimensions into bins\n as necessary. Then map the binstate to an integer.\n '
ds = self.binState(s)
return vec2id(ds, self.bins_per_dim)<|docstring|>Returns a unique id for a given state.
Essentially, enumerate all possible states and return the ID associated
with *s*.
Under the hood: first, discretize continuous dimensions into bins
as necessary. Then map the binstate to an integer.<|endoftext|>
|
11f3bfa142d7935a6982109d3553dbd97af399b90598f0af9a4ace92a87059d6
|
def setBinsPerDimension(self, domain, discretization):
'\n Set the number of bins for each dimension of the domain.\n Continuous spaces will be slices using the ``discretization`` parameter.\n :param domain: the problem :py:class:`~rlpy.Domains.Domain.Domain` to learn\n :param discretization: The number of bins a continuous domain should be sliced into.\n\n '
self.bins_per_dim = np.zeros(domain.state_space_dims, np.uint16)
self.binWidth_per_dim = np.zeros(domain.state_space_dims)
for d in range(domain.state_space_dims):
if (d in domain.continuous_dims):
self.bins_per_dim[d] = discretization
else:
self.bins_per_dim[d] = (domain.statespace_limits[(d, 1)] - domain.statespace_limits[(d, 0)])
self.binWidth_per_dim[d] = old_div((domain.statespace_limits[(d, 1)] - domain.statespace_limits[(d, 0)]), (self.bins_per_dim[d] * 1.0))
|
Set the number of bins for each dimension of the domain.
Continuous spaces will be slices using the ``discretization`` parameter.
:param domain: the problem :py:class:`~rlpy.Domains.Domain.Domain` to learn
:param discretization: The number of bins a continuous domain should be sliced into.
|
rlpy/Representations/Representation.py
|
setBinsPerDimension
|
okkhoy/rlpy
| 265 |
python
|
def setBinsPerDimension(self, domain, discretization):
'\n Set the number of bins for each dimension of the domain.\n Continuous spaces will be slices using the ``discretization`` parameter.\n :param domain: the problem :py:class:`~rlpy.Domains.Domain.Domain` to learn\n :param discretization: The number of bins a continuous domain should be sliced into.\n\n '
self.bins_per_dim = np.zeros(domain.state_space_dims, np.uint16)
self.binWidth_per_dim = np.zeros(domain.state_space_dims)
for d in range(domain.state_space_dims):
if (d in domain.continuous_dims):
self.bins_per_dim[d] = discretization
else:
self.bins_per_dim[d] = (domain.statespace_limits[(d, 1)] - domain.statespace_limits[(d, 0)])
self.binWidth_per_dim[d] = old_div((domain.statespace_limits[(d, 1)] - domain.statespace_limits[(d, 0)]), (self.bins_per_dim[d] * 1.0))
|
def setBinsPerDimension(self, domain, discretization):
'\n Set the number of bins for each dimension of the domain.\n Continuous spaces will be slices using the ``discretization`` parameter.\n :param domain: the problem :py:class:`~rlpy.Domains.Domain.Domain` to learn\n :param discretization: The number of bins a continuous domain should be sliced into.\n\n '
self.bins_per_dim = np.zeros(domain.state_space_dims, np.uint16)
self.binWidth_per_dim = np.zeros(domain.state_space_dims)
for d in range(domain.state_space_dims):
if (d in domain.continuous_dims):
self.bins_per_dim[d] = discretization
else:
self.bins_per_dim[d] = (domain.statespace_limits[(d, 1)] - domain.statespace_limits[(d, 0)])
self.binWidth_per_dim[d] = old_div((domain.statespace_limits[(d, 1)] - domain.statespace_limits[(d, 0)]), (self.bins_per_dim[d] * 1.0))<|docstring|>Set the number of bins for each dimension of the domain.
Continuous spaces will be slices using the ``discretization`` parameter.
:param domain: the problem :py:class:`~rlpy.Domains.Domain.Domain` to learn
:param discretization: The number of bins a continuous domain should be sliced into.<|endoftext|>
|
9f082aae3888f1d4781707a7acfff4ae5c36dfb7047bd4f8f33b4c282d2ae7c0
|
def binState(self, s):
'\n Returns a vector where each element is the zero-indexed bin number\n corresponding with the given state.\n (See :py:meth:`~rlpy.Representations.Representation.Representation.hashState`)\n Note that this vector will have the same dimensionality as *s*.\n\n (Note: This method is binary compact; the negative case of binary features is\n excluded from feature activation.\n For example, if the domain has a light and the light is off, no feature\n will be added. This is because the very *absence* of the feature\n itself corresponds to the light being off.\n '
s = np.atleast_1d(s)
limits = self.domain.statespace_limits
assert np.all((s >= limits[(:, 0)]))
assert np.all((s <= limits[(:, 1)]))
width = (limits[(:, 1)] - limits[(:, 0)])
diff = (s - limits[(:, 0)])
bs = ((diff * self.bins_per_dim) / width).astype('uint32')
m = (bs == self.bins_per_dim)
bs[m] = (self.bins_per_dim[m] - 1)
return bs
|
Returns a vector where each element is the zero-indexed bin number
corresponding with the given state.
(See :py:meth:`~rlpy.Representations.Representation.Representation.hashState`)
Note that this vector will have the same dimensionality as *s*.
(Note: This method is binary compact; the negative case of binary features is
excluded from feature activation.
For example, if the domain has a light and the light is off, no feature
will be added. This is because the very *absence* of the feature
itself corresponds to the light being off.
|
rlpy/Representations/Representation.py
|
binState
|
okkhoy/rlpy
| 265 |
python
|
def binState(self, s):
'\n Returns a vector where each element is the zero-indexed bin number\n corresponding with the given state.\n (See :py:meth:`~rlpy.Representations.Representation.Representation.hashState`)\n Note that this vector will have the same dimensionality as *s*.\n\n (Note: This method is binary compact; the negative case of binary features is\n excluded from feature activation.\n For example, if the domain has a light and the light is off, no feature\n will be added. This is because the very *absence* of the feature\n itself corresponds to the light being off.\n '
s = np.atleast_1d(s)
limits = self.domain.statespace_limits
assert np.all((s >= limits[(:, 0)]))
assert np.all((s <= limits[(:, 1)]))
width = (limits[(:, 1)] - limits[(:, 0)])
diff = (s - limits[(:, 0)])
bs = ((diff * self.bins_per_dim) / width).astype('uint32')
m = (bs == self.bins_per_dim)
bs[m] = (self.bins_per_dim[m] - 1)
return bs
|
def binState(self, s):
'\n Returns a vector where each element is the zero-indexed bin number\n corresponding with the given state.\n (See :py:meth:`~rlpy.Representations.Representation.Representation.hashState`)\n Note that this vector will have the same dimensionality as *s*.\n\n (Note: This method is binary compact; the negative case of binary features is\n excluded from feature activation.\n For example, if the domain has a light and the light is off, no feature\n will be added. This is because the very *absence* of the feature\n itself corresponds to the light being off.\n '
s = np.atleast_1d(s)
limits = self.domain.statespace_limits
assert np.all((s >= limits[(:, 0)]))
assert np.all((s <= limits[(:, 1)]))
width = (limits[(:, 1)] - limits[(:, 0)])
diff = (s - limits[(:, 0)])
bs = ((diff * self.bins_per_dim) / width).astype('uint32')
m = (bs == self.bins_per_dim)
bs[m] = (self.bins_per_dim[m] - 1)
return bs<|docstring|>Returns a vector where each element is the zero-indexed bin number
corresponding with the given state.
(See :py:meth:`~rlpy.Representations.Representation.Representation.hashState`)
Note that this vector will have the same dimensionality as *s*.
(Note: This method is binary compact; the negative case of binary features is
excluded from feature activation.
For example, if the domain has a light and the light is off, no feature
will be added. This is because the very *absence* of the feature
itself corresponds to the light being off.<|endoftext|>
|
f71bce01f8f6835e3edb440467e50d71341552470f979b3b2a88ad7b83fd1a8b
|
def bestActions(self, s, terminal, p_actions, phi_s=None):
'\n Returns a list of the best actions at a given state.\n If *phi_s* [the feature vector at state *s*] is given, it is used to\n speed up code by preventing re-computation within this function.\n\n See :py:meth:`~rlpy.Representations.Representation.Representation.bestAction`\n\n :param s: The given state\n :param terminal: Whether or not the state *s* is a terminal one.\n :param phi_s: (optional) the feature vector at state (s).\n :return: A list of the best actions at the given state.\n\n '
Qs = self.Qs(s, terminal, phi_s)
Qs = Qs[p_actions]
ind = findElemArray1D(Qs, Qs.max())
return np.array(p_actions)[ind]
|
Returns a list of the best actions at a given state.
If *phi_s* [the feature vector at state *s*] is given, it is used to
speed up code by preventing re-computation within this function.
See :py:meth:`~rlpy.Representations.Representation.Representation.bestAction`
:param s: The given state
:param terminal: Whether or not the state *s* is a terminal one.
:param phi_s: (optional) the feature vector at state (s).
:return: A list of the best actions at the given state.
|
rlpy/Representations/Representation.py
|
bestActions
|
okkhoy/rlpy
| 265 |
python
|
def bestActions(self, s, terminal, p_actions, phi_s=None):
'\n Returns a list of the best actions at a given state.\n If *phi_s* [the feature vector at state *s*] is given, it is used to\n speed up code by preventing re-computation within this function.\n\n See :py:meth:`~rlpy.Representations.Representation.Representation.bestAction`\n\n :param s: The given state\n :param terminal: Whether or not the state *s* is a terminal one.\n :param phi_s: (optional) the feature vector at state (s).\n :return: A list of the best actions at the given state.\n\n '
Qs = self.Qs(s, terminal, phi_s)
Qs = Qs[p_actions]
ind = findElemArray1D(Qs, Qs.max())
return np.array(p_actions)[ind]
|
def bestActions(self, s, terminal, p_actions, phi_s=None):
'\n Returns a list of the best actions at a given state.\n If *phi_s* [the feature vector at state *s*] is given, it is used to\n speed up code by preventing re-computation within this function.\n\n See :py:meth:`~rlpy.Representations.Representation.Representation.bestAction`\n\n :param s: The given state\n :param terminal: Whether or not the state *s* is a terminal one.\n :param phi_s: (optional) the feature vector at state (s).\n :return: A list of the best actions at the given state.\n\n '
Qs = self.Qs(s, terminal, phi_s)
Qs = Qs[p_actions]
ind = findElemArray1D(Qs, Qs.max())
return np.array(p_actions)[ind]<|docstring|>Returns a list of the best actions at a given state.
If *phi_s* [the feature vector at state *s*] is given, it is used to
speed up code by preventing re-computation within this function.
See :py:meth:`~rlpy.Representations.Representation.Representation.bestAction`
:param s: The given state
:param terminal: Whether or not the state *s* is a terminal one.
:param phi_s: (optional) the feature vector at state (s).
:return: A list of the best actions at the given state.<|endoftext|>
|
2a1d959b40c2b6f01f250b0e6413d841315242de7baae9c2a27c616bc903f145
|
def pre_discover(self, s, terminal, a, sn, terminaln):
'\n Identifies and adds ("discovers") new features for this adaptive\n representation BEFORE having obtained the TD-Error.\n For example, see :py:class:`~rlpy.Representations.IncrementalTabular.IncrementalTabular`.\n In that class, a new feature is added anytime a novel state is observed.\n\n .. note::\n For adaptive representations that require access to TD-Error to\n determine which features to add next,\n use :py:meth:`~rlpy.Representations.Representation.Representation.post_discover`\n instead.\n\n :param s: The state\n :param terminal: boolean, whether or not *s* is a terminal state.\n :param a: The action\n :param sn: The next state\n :param terminaln: boolean, whether or not *sn* is a terminal state.\n\n :return: The number of new features added to the representation\n '
return 0
|
Identifies and adds ("discovers") new features for this adaptive
representation BEFORE having obtained the TD-Error.
For example, see :py:class:`~rlpy.Representations.IncrementalTabular.IncrementalTabular`.
In that class, a new feature is added anytime a novel state is observed.
.. note::
For adaptive representations that require access to TD-Error to
determine which features to add next,
use :py:meth:`~rlpy.Representations.Representation.Representation.post_discover`
instead.
:param s: The state
:param terminal: boolean, whether or not *s* is a terminal state.
:param a: The action
:param sn: The next state
:param terminaln: boolean, whether or not *sn* is a terminal state.
:return: The number of new features added to the representation
|
rlpy/Representations/Representation.py
|
pre_discover
|
okkhoy/rlpy
| 265 |
python
|
def pre_discover(self, s, terminal, a, sn, terminaln):
'\n Identifies and adds ("discovers") new features for this adaptive\n representation BEFORE having obtained the TD-Error.\n For example, see :py:class:`~rlpy.Representations.IncrementalTabular.IncrementalTabular`.\n In that class, a new feature is added anytime a novel state is observed.\n\n .. note::\n For adaptive representations that require access to TD-Error to\n determine which features to add next,\n use :py:meth:`~rlpy.Representations.Representation.Representation.post_discover`\n instead.\n\n :param s: The state\n :param terminal: boolean, whether or not *s* is a terminal state.\n :param a: The action\n :param sn: The next state\n :param terminaln: boolean, whether or not *sn* is a terminal state.\n\n :return: The number of new features added to the representation\n '
return 0
|
def pre_discover(self, s, terminal, a, sn, terminaln):
'\n Identifies and adds ("discovers") new features for this adaptive\n representation BEFORE having obtained the TD-Error.\n For example, see :py:class:`~rlpy.Representations.IncrementalTabular.IncrementalTabular`.\n In that class, a new feature is added anytime a novel state is observed.\n\n .. note::\n For adaptive representations that require access to TD-Error to\n determine which features to add next,\n use :py:meth:`~rlpy.Representations.Representation.Representation.post_discover`\n instead.\n\n :param s: The state\n :param terminal: boolean, whether or not *s* is a terminal state.\n :param a: The action\n :param sn: The next state\n :param terminaln: boolean, whether or not *sn* is a terminal state.\n\n :return: The number of new features added to the representation\n '
return 0<|docstring|>Identifies and adds ("discovers") new features for this adaptive
representation BEFORE having obtained the TD-Error.
For example, see :py:class:`~rlpy.Representations.IncrementalTabular.IncrementalTabular`.
In that class, a new feature is added anytime a novel state is observed.
.. note::
For adaptive representations that require access to TD-Error to
determine which features to add next,
use :py:meth:`~rlpy.Representations.Representation.Representation.post_discover`
instead.
:param s: The state
:param terminal: boolean, whether or not *s* is a terminal state.
:param a: The action
:param sn: The next state
:param terminaln: boolean, whether or not *sn* is a terminal state.
:return: The number of new features added to the representation<|endoftext|>
|
5e7bbb966cb1801f6d050a569469018d7361bb7f3c90a01f994bd0f9d1233785
|
def post_discover(self, s, terminal, a, td_error, phi_s):
'\n Identifies and adds ("discovers") new features for this adaptive\n representation AFTER having obtained the TD-Error.\n For example, see :py:class:`~rlpy.Representations.iFDD.iFDD`.\n In that class, a new feature is added based on regions of high TD-Error.\n\n .. note::\n For adaptive representations that do not require access to TD-Error\n to determine which features to add next, you may\n use :py:meth:`~rlpy.Representations.Representation.Representation.pre_discover`\n instead.\n\n :param s: The state\n :param terminal: boolean, whether or not *s* is a terminal state.\n :param a: The action\n :param td_error: The temporal difference error at this transition.\n :param phi_s: The feature vector evaluated at state *s*.\n\n :return: The number of new features added to the representation\n '
return 0
|
Identifies and adds ("discovers") new features for this adaptive
representation AFTER having obtained the TD-Error.
For example, see :py:class:`~rlpy.Representations.iFDD.iFDD`.
In that class, a new feature is added based on regions of high TD-Error.
.. note::
For adaptive representations that do not require access to TD-Error
to determine which features to add next, you may
use :py:meth:`~rlpy.Representations.Representation.Representation.pre_discover`
instead.
:param s: The state
:param terminal: boolean, whether or not *s* is a terminal state.
:param a: The action
:param td_error: The temporal difference error at this transition.
:param phi_s: The feature vector evaluated at state *s*.
:return: The number of new features added to the representation
|
rlpy/Representations/Representation.py
|
post_discover
|
okkhoy/rlpy
| 265 |
python
|
def post_discover(self, s, terminal, a, td_error, phi_s):
'\n Identifies and adds ("discovers") new features for this adaptive\n representation AFTER having obtained the TD-Error.\n For example, see :py:class:`~rlpy.Representations.iFDD.iFDD`.\n In that class, a new feature is added based on regions of high TD-Error.\n\n .. note::\n For adaptive representations that do not require access to TD-Error\n to determine which features to add next, you may\n use :py:meth:`~rlpy.Representations.Representation.Representation.pre_discover`\n instead.\n\n :param s: The state\n :param terminal: boolean, whether or not *s* is a terminal state.\n :param a: The action\n :param td_error: The temporal difference error at this transition.\n :param phi_s: The feature vector evaluated at state *s*.\n\n :return: The number of new features added to the representation\n '
return 0
|
def post_discover(self, s, terminal, a, td_error, phi_s):
'\n Identifies and adds ("discovers") new features for this adaptive\n representation AFTER having obtained the TD-Error.\n For example, see :py:class:`~rlpy.Representations.iFDD.iFDD`.\n In that class, a new feature is added based on regions of high TD-Error.\n\n .. note::\n For adaptive representations that do not require access to TD-Error\n to determine which features to add next, you may\n use :py:meth:`~rlpy.Representations.Representation.Representation.pre_discover`\n instead.\n\n :param s: The state\n :param terminal: boolean, whether or not *s* is a terminal state.\n :param a: The action\n :param td_error: The temporal difference error at this transition.\n :param phi_s: The feature vector evaluated at state *s*.\n\n :return: The number of new features added to the representation\n '
return 0<|docstring|>Identifies and adds ("discovers") new features for this adaptive
representation AFTER having obtained the TD-Error.
For example, see :py:class:`~rlpy.Representations.iFDD.iFDD`.
In that class, a new feature is added based on regions of high TD-Error.
.. note::
For adaptive representations that do not require access to TD-Error
to determine which features to add next, you may
use :py:meth:`~rlpy.Representations.Representation.Representation.pre_discover`
instead.
:param s: The state
:param terminal: boolean, whether or not *s* is a terminal state.
:param a: The action
:param td_error: The temporal difference error at this transition.
:param phi_s: The feature vector evaluated at state *s*.
:return: The number of new features added to the representation<|endoftext|>
|
d43a389426735e661f366e2aa5bd1786736fe9c504b57a9e5f8a8b24af712e47
|
def bestAction(self, s, terminal, p_actions, phi_s=None):
'\n Returns the best action at a given state.\n If there are multiple best actions, this method selects one of them\n uniformly randomly.\n If *phi_s* [the feature vector at state *s*] is given, it is used to\n speed up code by preventing re-computation within this function.\n\n See :py:meth:`~rlpy.Representations.Representation.Representation.bestActions`\n\n :param s: The given state\n :param terminal: Whether or not the state *s* is a terminal one.\n :param phi_s: (optional) the feature vector at state (s).\n :return: The best action at the given state.\n '
bestA = self.bestActions(s, terminal, p_actions, phi_s)
if isinstance(bestA, int):
return bestA
elif (len(bestA) > 1):
return self.random_state.choice(bestA)
else:
return bestA[0]
|
Returns the best action at a given state.
If there are multiple best actions, this method selects one of them
uniformly randomly.
If *phi_s* [the feature vector at state *s*] is given, it is used to
speed up code by preventing re-computation within this function.
See :py:meth:`~rlpy.Representations.Representation.Representation.bestActions`
:param s: The given state
:param terminal: Whether or not the state *s* is a terminal one.
:param phi_s: (optional) the feature vector at state (s).
:return: The best action at the given state.
|
rlpy/Representations/Representation.py
|
bestAction
|
okkhoy/rlpy
| 265 |
python
|
def bestAction(self, s, terminal, p_actions, phi_s=None):
'\n Returns the best action at a given state.\n If there are multiple best actions, this method selects one of them\n uniformly randomly.\n If *phi_s* [the feature vector at state *s*] is given, it is used to\n speed up code by preventing re-computation within this function.\n\n See :py:meth:`~rlpy.Representations.Representation.Representation.bestActions`\n\n :param s: The given state\n :param terminal: Whether or not the state *s* is a terminal one.\n :param phi_s: (optional) the feature vector at state (s).\n :return: The best action at the given state.\n '
bestA = self.bestActions(s, terminal, p_actions, phi_s)
if isinstance(bestA, int):
return bestA
elif (len(bestA) > 1):
return self.random_state.choice(bestA)
else:
return bestA[0]
|
def bestAction(self, s, terminal, p_actions, phi_s=None):
'\n Returns the best action at a given state.\n If there are multiple best actions, this method selects one of them\n uniformly randomly.\n If *phi_s* [the feature vector at state *s*] is given, it is used to\n speed up code by preventing re-computation within this function.\n\n See :py:meth:`~rlpy.Representations.Representation.Representation.bestActions`\n\n :param s: The given state\n :param terminal: Whether or not the state *s* is a terminal one.\n :param phi_s: (optional) the feature vector at state (s).\n :return: The best action at the given state.\n '
bestA = self.bestActions(s, terminal, p_actions, phi_s)
if isinstance(bestA, int):
return bestA
elif (len(bestA) > 1):
return self.random_state.choice(bestA)
else:
return bestA[0]<|docstring|>Returns the best action at a given state.
If there are multiple best actions, this method selects one of them
uniformly randomly.
If *phi_s* [the feature vector at state *s*] is given, it is used to
speed up code by preventing re-computation within this function.
See :py:meth:`~rlpy.Representations.Representation.Representation.bestActions`
:param s: The given state
:param terminal: Whether or not the state *s* is a terminal one.
:param phi_s: (optional) the feature vector at state (s).
:return: The best action at the given state.<|endoftext|>
|
031760b9626b9d5211fb05851b405656b124b942718050289497fc3798537df5
|
def phi_nonTerminal(self, s):
' *Abstract Method* \n\n Returns the feature vector evaluated at state *s* for non-terminal\n states; see\n function :py:meth:`~rlpy.Representations.Representation.Representation.phi`\n for the general case.\n\n :param s: The given state\n\n :return: The feature vector evaluated at state *s*.\n '
raise NotImplementedError
|
*Abstract Method*
Returns the feature vector evaluated at state *s* for non-terminal
states; see
function :py:meth:`~rlpy.Representations.Representation.Representation.phi`
for the general case.
:param s: The given state
:return: The feature vector evaluated at state *s*.
|
rlpy/Representations/Representation.py
|
phi_nonTerminal
|
okkhoy/rlpy
| 265 |
python
|
def phi_nonTerminal(self, s):
' *Abstract Method* \n\n Returns the feature vector evaluated at state *s* for non-terminal\n states; see\n function :py:meth:`~rlpy.Representations.Representation.Representation.phi`\n for the general case.\n\n :param s: The given state\n\n :return: The feature vector evaluated at state *s*.\n '
raise NotImplementedError
|
def phi_nonTerminal(self, s):
' *Abstract Method* \n\n Returns the feature vector evaluated at state *s* for non-terminal\n states; see\n function :py:meth:`~rlpy.Representations.Representation.Representation.phi`\n for the general case.\n\n :param s: The given state\n\n :return: The feature vector evaluated at state *s*.\n '
raise NotImplementedError<|docstring|>*Abstract Method*
Returns the feature vector evaluated at state *s* for non-terminal
states; see
function :py:meth:`~rlpy.Representations.Representation.Representation.phi`
for the general case.
:param s: The given state
:return: The feature vector evaluated at state *s*.<|endoftext|>
|
1dfd07f6ce917d59e868edf15bee7c876d163cc3c1eeabe4c6a11de9ef54db09
|
def activeInitialFeatures(self, s):
'\n Returns the index of active initial features based on bins in each\n dimension.\n :param s: The state\n\n :return: The active initial features of this representation\n (before expansion)\n '
bs = self.binState(s)
shifts = np.hstack((0, np.cumsum(self.bins_per_dim)[:(- 1)]))
index = (bs + shifts)
return index.astype('uint32')
|
Returns the index of active initial features based on bins in each
dimension.
:param s: The state
:return: The active initial features of this representation
(before expansion)
|
rlpy/Representations/Representation.py
|
activeInitialFeatures
|
okkhoy/rlpy
| 265 |
python
|
def activeInitialFeatures(self, s):
'\n Returns the index of active initial features based on bins in each\n dimension.\n :param s: The state\n\n :return: The active initial features of this representation\n (before expansion)\n '
bs = self.binState(s)
shifts = np.hstack((0, np.cumsum(self.bins_per_dim)[:(- 1)]))
index = (bs + shifts)
return index.astype('uint32')
|
def activeInitialFeatures(self, s):
'\n Returns the index of active initial features based on bins in each\n dimension.\n :param s: The state\n\n :return: The active initial features of this representation\n (before expansion)\n '
bs = self.binState(s)
shifts = np.hstack((0, np.cumsum(self.bins_per_dim)[:(- 1)]))
index = (bs + shifts)
return index.astype('uint32')<|docstring|>Returns the index of active initial features based on bins in each
dimension.
:param s: The state
:return: The active initial features of this representation
(before expansion)<|endoftext|>
|
0b8250026e8f89c0f3b790661abdec6e341da4522fdbbf4b6311906098610408
|
def batchPhi_s_a(self, all_phi_s, all_actions, all_phi_s_a=None, use_sparse=False):
'\n Builds the feature vector for a series of state-action pairs (s,a)\n using the copy-paste method.\n\n .. note::\n See :py:meth:`~rlpy.Representations.Representation.Representation.phi_sa`\n for more information.\n\n :param all_phi_s: The feature vectors evaluated at a series of states.\n Has dimension *p* x *n*, where *p* is the number of states\n (indexed by row), and *n* is the number of features.\n :param all_actions: The set of actions corresponding to each feature.\n Dimension *p* x *1*, where *p* is the number of states included\n in this batch.\n :param all_phi_s_a: (Optional) Feature vector for a series of\n state-action pairs (s,a) using the copy-paste method.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n :param use_sparse: Determines whether or not to use sparse matrix\n libraries provided with numpy.\n\n\n :return: all_phi_s_a (of dimension p x (s_a) )\n '
(p, n) = all_phi_s.shape
a_num = self.actions_num
if use_sparse:
phi_s_a = sp.lil_matrix((p, (n * a_num)), dtype=all_phi_s.dtype)
else:
phi_s_a = np.zeros((p, (n * a_num)), dtype=all_phi_s.dtype)
for i in range(a_num):
rows = np.where((all_actions == i))[0]
if len(rows):
phi_s_a[(rows, (i * n):((i + 1) * n))] = all_phi_s[(rows, :)]
return phi_s_a
|
Builds the feature vector for a series of state-action pairs (s,a)
using the copy-paste method.
.. note::
See :py:meth:`~rlpy.Representations.Representation.Representation.phi_sa`
for more information.
:param all_phi_s: The feature vectors evaluated at a series of states.
Has dimension *p* x *n*, where *p* is the number of states
(indexed by row), and *n* is the number of features.
:param all_actions: The set of actions corresponding to each feature.
Dimension *p* x *1*, where *p* is the number of states included
in this batch.
:param all_phi_s_a: (Optional) Feature vector for a series of
state-action pairs (s,a) using the copy-paste method.
If the feature vector phi(s) has already been cached,
pass it here as input so that it need not be computed again.
:param use_sparse: Determines whether or not to use sparse matrix
libraries provided with numpy.
:return: all_phi_s_a (of dimension p x (s_a) )
|
rlpy/Representations/Representation.py
|
batchPhi_s_a
|
okkhoy/rlpy
| 265 |
python
|
def batchPhi_s_a(self, all_phi_s, all_actions, all_phi_s_a=None, use_sparse=False):
'\n Builds the feature vector for a series of state-action pairs (s,a)\n using the copy-paste method.\n\n .. note::\n See :py:meth:`~rlpy.Representations.Representation.Representation.phi_sa`\n for more information.\n\n :param all_phi_s: The feature vectors evaluated at a series of states.\n Has dimension *p* x *n*, where *p* is the number of states\n (indexed by row), and *n* is the number of features.\n :param all_actions: The set of actions corresponding to each feature.\n Dimension *p* x *1*, where *p* is the number of states included\n in this batch.\n :param all_phi_s_a: (Optional) Feature vector for a series of\n state-action pairs (s,a) using the copy-paste method.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n :param use_sparse: Determines whether or not to use sparse matrix\n libraries provided with numpy.\n\n\n :return: all_phi_s_a (of dimension p x (s_a) )\n '
(p, n) = all_phi_s.shape
a_num = self.actions_num
if use_sparse:
phi_s_a = sp.lil_matrix((p, (n * a_num)), dtype=all_phi_s.dtype)
else:
phi_s_a = np.zeros((p, (n * a_num)), dtype=all_phi_s.dtype)
for i in range(a_num):
rows = np.where((all_actions == i))[0]
if len(rows):
phi_s_a[(rows, (i * n):((i + 1) * n))] = all_phi_s[(rows, :)]
return phi_s_a
|
def batchPhi_s_a(self, all_phi_s, all_actions, all_phi_s_a=None, use_sparse=False):
'\n Builds the feature vector for a series of state-action pairs (s,a)\n using the copy-paste method.\n\n .. note::\n See :py:meth:`~rlpy.Representations.Representation.Representation.phi_sa`\n for more information.\n\n :param all_phi_s: The feature vectors evaluated at a series of states.\n Has dimension *p* x *n*, where *p* is the number of states\n (indexed by row), and *n* is the number of features.\n :param all_actions: The set of actions corresponding to each feature.\n Dimension *p* x *1*, where *p* is the number of states included\n in this batch.\n :param all_phi_s_a: (Optional) Feature vector for a series of\n state-action pairs (s,a) using the copy-paste method.\n If the feature vector phi(s) has already been cached,\n pass it here as input so that it need not be computed again.\n :param use_sparse: Determines whether or not to use sparse matrix\n libraries provided with numpy.\n\n\n :return: all_phi_s_a (of dimension p x (s_a) )\n '
(p, n) = all_phi_s.shape
a_num = self.actions_num
if use_sparse:
phi_s_a = sp.lil_matrix((p, (n * a_num)), dtype=all_phi_s.dtype)
else:
phi_s_a = np.zeros((p, (n * a_num)), dtype=all_phi_s.dtype)
for i in range(a_num):
rows = np.where((all_actions == i))[0]
if len(rows):
phi_s_a[(rows, (i * n):((i + 1) * n))] = all_phi_s[(rows, :)]
return phi_s_a<|docstring|>Builds the feature vector for a series of state-action pairs (s,a)
using the copy-paste method.
.. note::
See :py:meth:`~rlpy.Representations.Representation.Representation.phi_sa`
for more information.
:param all_phi_s: The feature vectors evaluated at a series of states.
Has dimension *p* x *n*, where *p* is the number of states
(indexed by row), and *n* is the number of features.
:param all_actions: The set of actions corresponding to each feature.
Dimension *p* x *1*, where *p* is the number of states included
in this batch.
:param all_phi_s_a: (Optional) Feature vector for a series of
state-action pairs (s,a) using the copy-paste method.
If the feature vector phi(s) has already been cached,
pass it here as input so that it need not be computed again.
:param use_sparse: Determines whether or not to use sparse matrix
libraries provided with numpy.
:return: all_phi_s_a (of dimension p x (s_a) )<|endoftext|>
|
413d1fb9e9b0859ff2796a2926d31acd738ee2adf2746726328150930ab230ff
|
def batchBestAction(self, all_s, all_phi_s, action_mask=None, useSparse=True):
'\n Accepts a batch of states, returns the best action associated with each.\n\n .. note::\n See :py:meth:`~rlpy.Representations.Representation.Representation.bestAction`\n\n :param all_s: An array of all the states to consider.\n :param all_phi_s: The feature vectors evaluated at a series of states.\n Has dimension *p* x *n*, where *p* is the number of states\n (indexed by row), and *n* is the number of features.\n :param action_mask: (optional) a *p* x *|A|* mask on the possible\n actions to consider, where *|A|* is the size of the action space.\n The mask is a binary 2-d array, where 1 indicates an active mask\n (action is unavailable) while 0 indicates a possible action.\n :param useSparse: Determines whether or not to use sparse matrix\n libraries provided with numpy.\n\n :return: An array of the best action associated with each state.\n\n '
(p, n) = all_phi_s.shape
a_num = self.actions_num
if (action_mask is None):
action_mask = np.ones((p, a_num))
for (i, s) in enumerate(all_s):
action_mask[(i, self.domain.possibleActions(s))] = 0
a_num = self.actions_num
if useSparse:
all_phi_s_a = sp.kron(np.eye(a_num, a_num), all_phi_s)
all_q_s_a = (all_phi_s_a * self.weight_vec.reshape((- 1), 1))
else:
all_phi_s_a = np.kron(np.eye(a_num, a_num), all_phi_s)
all_q_s_a = np.dot(all_phi_s_a, self.weight_vec.T)
all_q_s_a = all_q_s_a.reshape((a_num, (- 1))).T
all_q_s_a = np.ma.masked_array(all_q_s_a, mask=action_mask)
best_action = np.argmax(all_q_s_a, axis=1)
phi_s_a = self.batchPhi_s_a(all_phi_s, best_action, all_phi_s_a, useSparse)
return (best_action, phi_s_a, action_mask)
|
Accepts a batch of states, returns the best action associated with each.
.. note::
See :py:meth:`~rlpy.Representations.Representation.Representation.bestAction`
:param all_s: An array of all the states to consider.
:param all_phi_s: The feature vectors evaluated at a series of states.
Has dimension *p* x *n*, where *p* is the number of states
(indexed by row), and *n* is the number of features.
:param action_mask: (optional) a *p* x *|A|* mask on the possible
actions to consider, where *|A|* is the size of the action space.
The mask is a binary 2-d array, where 1 indicates an active mask
(action is unavailable) while 0 indicates a possible action.
:param useSparse: Determines whether or not to use sparse matrix
libraries provided with numpy.
:return: An array of the best action associated with each state.
|
rlpy/Representations/Representation.py
|
batchBestAction
|
okkhoy/rlpy
| 265 |
python
|
def batchBestAction(self, all_s, all_phi_s, action_mask=None, useSparse=True):
'\n Accepts a batch of states, returns the best action associated with each.\n\n .. note::\n See :py:meth:`~rlpy.Representations.Representation.Representation.bestAction`\n\n :param all_s: An array of all the states to consider.\n :param all_phi_s: The feature vectors evaluated at a series of states.\n Has dimension *p* x *n*, where *p* is the number of states\n (indexed by row), and *n* is the number of features.\n :param action_mask: (optional) a *p* x *|A|* mask on the possible\n actions to consider, where *|A|* is the size of the action space.\n The mask is a binary 2-d array, where 1 indicates an active mask\n (action is unavailable) while 0 indicates a possible action.\n :param useSparse: Determines whether or not to use sparse matrix\n libraries provided with numpy.\n\n :return: An array of the best action associated with each state.\n\n '
(p, n) = all_phi_s.shape
a_num = self.actions_num
if (action_mask is None):
action_mask = np.ones((p, a_num))
for (i, s) in enumerate(all_s):
action_mask[(i, self.domain.possibleActions(s))] = 0
a_num = self.actions_num
if useSparse:
all_phi_s_a = sp.kron(np.eye(a_num, a_num), all_phi_s)
all_q_s_a = (all_phi_s_a * self.weight_vec.reshape((- 1), 1))
else:
all_phi_s_a = np.kron(np.eye(a_num, a_num), all_phi_s)
all_q_s_a = np.dot(all_phi_s_a, self.weight_vec.T)
all_q_s_a = all_q_s_a.reshape((a_num, (- 1))).T
all_q_s_a = np.ma.masked_array(all_q_s_a, mask=action_mask)
best_action = np.argmax(all_q_s_a, axis=1)
phi_s_a = self.batchPhi_s_a(all_phi_s, best_action, all_phi_s_a, useSparse)
return (best_action, phi_s_a, action_mask)
|
def batchBestAction(self, all_s, all_phi_s, action_mask=None, useSparse=True):
'\n Accepts a batch of states, returns the best action associated with each.\n\n .. note::\n See :py:meth:`~rlpy.Representations.Representation.Representation.bestAction`\n\n :param all_s: An array of all the states to consider.\n :param all_phi_s: The feature vectors evaluated at a series of states.\n Has dimension *p* x *n*, where *p* is the number of states\n (indexed by row), and *n* is the number of features.\n :param action_mask: (optional) a *p* x *|A|* mask on the possible\n actions to consider, where *|A|* is the size of the action space.\n The mask is a binary 2-d array, where 1 indicates an active mask\n (action is unavailable) while 0 indicates a possible action.\n :param useSparse: Determines whether or not to use sparse matrix\n libraries provided with numpy.\n\n :return: An array of the best action associated with each state.\n\n '
(p, n) = all_phi_s.shape
a_num = self.actions_num
if (action_mask is None):
action_mask = np.ones((p, a_num))
for (i, s) in enumerate(all_s):
action_mask[(i, self.domain.possibleActions(s))] = 0
a_num = self.actions_num
if useSparse:
all_phi_s_a = sp.kron(np.eye(a_num, a_num), all_phi_s)
all_q_s_a = (all_phi_s_a * self.weight_vec.reshape((- 1), 1))
else:
all_phi_s_a = np.kron(np.eye(a_num, a_num), all_phi_s)
all_q_s_a = np.dot(all_phi_s_a, self.weight_vec.T)
all_q_s_a = all_q_s_a.reshape((a_num, (- 1))).T
all_q_s_a = np.ma.masked_array(all_q_s_a, mask=action_mask)
best_action = np.argmax(all_q_s_a, axis=1)
phi_s_a = self.batchPhi_s_a(all_phi_s, best_action, all_phi_s_a, useSparse)
return (best_action, phi_s_a, action_mask)<|docstring|>Accepts a batch of states, returns the best action associated with each.
.. note::
See :py:meth:`~rlpy.Representations.Representation.Representation.bestAction`
:param all_s: An array of all the states to consider.
:param all_phi_s: The feature vectors evaluated at a series of states.
Has dimension *p* x *n*, where *p* is the number of states
(indexed by row), and *n* is the number of features.
:param action_mask: (optional) a *p* x *|A|* mask on the possible
actions to consider, where *|A|* is the size of the action space.
The mask is a binary 2-d array, where 1 indicates an active mask
(action is unavailable) while 0 indicates a possible action.
:param useSparse: Determines whether or not to use sparse matrix
libraries provided with numpy.
:return: An array of the best action associated with each state.<|endoftext|>
|
1519cc5822d49d0439fef2820bd5e1286f17d0572d4c2fb5130efb8082aee9c7
|
def featureType(self):
" *Abstract Method* \n\n Return the data type for the underlying features (eg 'float').\n "
raise NotImplementedError
|
*Abstract Method*
Return the data type for the underlying features (eg 'float').
|
rlpy/Representations/Representation.py
|
featureType
|
okkhoy/rlpy
| 265 |
python
|
def featureType(self):
" *Abstract Method* \n\n Return the data type for the underlying features (eg 'float').\n "
raise NotImplementedError
|
def featureType(self):
" *Abstract Method* \n\n Return the data type for the underlying features (eg 'float').\n "
raise NotImplementedError<|docstring|>*Abstract Method*
Return the data type for the underlying features (eg 'float').<|endoftext|>
|
2948ddd17fde8b1acacc4b615a8b2c773ff8bbb8970527791dcfa7281f16c139
|
def Q_oneStepLookAhead(self, s, a, ns_samples, policy=None):
'\n Returns the state action value, Q(s,a), by performing one step\n look-ahead on the domain.\n\n .. note::\n For an example of how this function works, see\n `Line 8 of Figure 4.3 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_\n in Sutton and Barto 1998.\n\n If the domain does not define ``expectedStep()``, this function uses\n ``ns_samples`` samples to estimate the one_step look-ahead.\n If a policy is passed (used in the policy evaluation), it is used to\n generate the action for the next state.\n Otherwise the best action is selected.\n\n .. note::\n This function should not be called in any RL algorithms unless\n the underlying domain is an approximation of the true model.\n\n :param s: The given state\n :param a: The given action\n :param ns_samples: The number of samples used to estimate the one_step look-ahead.\n :param policy: (optional) Used to select the action in the next state\n (*after* taking action a) when estimating the one_step look-aghead.\n If ``policy == None``, the best action will be selected.\n\n :return: The one-step lookahead state-action value, Q(s,a).\n '
self.continuous_state_starting_samples = 10
if hasFunction(self, 'addState'):
self.addState(s)
discount_factor = self.domain.discount_factor
if hasFunction(self.domain, 'expectedStep'):
(p, r, ns, t, p_actions) = self.domain.expectedStep(s, a)
Q = 0
for j in range(len(p)):
if (policy is None):
Q += (p[(j, 0)] * (r[(j, 0)] + (discount_factor * self.V(ns[(j, :)], t[(j, :)], p_actions[j]))))
elif len(self.domain.possibleActions(ns[(j, :)])):
na = policy.pi(ns[(j, :)], t[(j, :)], self.domain.possibleActions(ns[(j, :)]))
Q += (p[(j, 0)] * (r[(j, 0)] + (discount_factor * self.Q(ns[(j, :)], t[(j, :)], na))))
else:
key = tuple(np.hstack((s, [a])))
cacheHit = self.expectedStepCached.get(key)
if (cacheHit is None):
s = self.stateInTheMiddleOfGrid(s)
if len(self.domain.continuous_dims):
next_states = np.empty((ns_samples, self.domain.state_space_dims))
rewards = np.empty(ns_samples)
ns_samples_ = old_div(ns_samples, self.continuous_state_starting_samples)
for i in range(self.continuous_state_starting_samples):
new_s = s.copy()
for d in range(self.domain.state_space_dims):
w = self.binWidth_per_dim[d]
new_s[d] = (((self.random_state.rand() - 0.5) * w) + s[d])
if (not (d in self.domain.continuous_dims)):
new_s[d] = int(new_s[d])
(ns, r) = self.domain.sampleStep(new_s, a, ns_samples_)
next_states[((i * ns_samples_):((i + 1) * ns_samples_), :)] = ns
rewards[(i * ns_samples_):((i + 1) * ns_samples_)] = r
else:
(next_states, rewards) = self.domain.sampleStep(s, a, ns_samples)
self.expectedStepCached[key] = [next_states, rewards]
else:
(next_states, rewards) = cacheHit
if (policy is None):
Q = np.mean([(rewards[i] + (discount_factor * self.V(next_states[(i, :)]))) for i in range(ns_samples)])
else:
Q = np.mean([(rewards[i] + (discount_factor * self.Q(next_states[(i, :)], policy.pi(next_states[(i, :)])))) for i in range(ns_samples)])
return Q
|
Returns the state action value, Q(s,a), by performing one step
look-ahead on the domain.
.. note::
For an example of how this function works, see
`Line 8 of Figure 4.3 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_
in Sutton and Barto 1998.
If the domain does not define ``expectedStep()``, this function uses
``ns_samples`` samples to estimate the one_step look-ahead.
If a policy is passed (used in the policy evaluation), it is used to
generate the action for the next state.
Otherwise the best action is selected.
.. note::
This function should not be called in any RL algorithms unless
the underlying domain is an approximation of the true model.
:param s: The given state
:param a: The given action
:param ns_samples: The number of samples used to estimate the one_step look-ahead.
:param policy: (optional) Used to select the action in the next state
(*after* taking action a) when estimating the one_step look-aghead.
If ``policy == None``, the best action will be selected.
:return: The one-step lookahead state-action value, Q(s,a).
|
rlpy/Representations/Representation.py
|
Q_oneStepLookAhead
|
okkhoy/rlpy
| 265 |
python
|
def Q_oneStepLookAhead(self, s, a, ns_samples, policy=None):
'\n Returns the state action value, Q(s,a), by performing one step\n look-ahead on the domain.\n\n .. note::\n For an example of how this function works, see\n `Line 8 of Figure 4.3 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_\n in Sutton and Barto 1998.\n\n If the domain does not define ``expectedStep()``, this function uses\n ``ns_samples`` samples to estimate the one_step look-ahead.\n If a policy is passed (used in the policy evaluation), it is used to\n generate the action for the next state.\n Otherwise the best action is selected.\n\n .. note::\n This function should not be called in any RL algorithms unless\n the underlying domain is an approximation of the true model.\n\n :param s: The given state\n :param a: The given action\n :param ns_samples: The number of samples used to estimate the one_step look-ahead.\n :param policy: (optional) Used to select the action in the next state\n (*after* taking action a) when estimating the one_step look-aghead.\n If ``policy == None``, the best action will be selected.\n\n :return: The one-step lookahead state-action value, Q(s,a).\n '
self.continuous_state_starting_samples = 10
if hasFunction(self, 'addState'):
self.addState(s)
discount_factor = self.domain.discount_factor
if hasFunction(self.domain, 'expectedStep'):
(p, r, ns, t, p_actions) = self.domain.expectedStep(s, a)
Q = 0
for j in range(len(p)):
if (policy is None):
Q += (p[(j, 0)] * (r[(j, 0)] + (discount_factor * self.V(ns[(j, :)], t[(j, :)], p_actions[j]))))
elif len(self.domain.possibleActions(ns[(j, :)])):
na = policy.pi(ns[(j, :)], t[(j, :)], self.domain.possibleActions(ns[(j, :)]))
Q += (p[(j, 0)] * (r[(j, 0)] + (discount_factor * self.Q(ns[(j, :)], t[(j, :)], na))))
else:
key = tuple(np.hstack((s, [a])))
cacheHit = self.expectedStepCached.get(key)
if (cacheHit is None):
s = self.stateInTheMiddleOfGrid(s)
if len(self.domain.continuous_dims):
next_states = np.empty((ns_samples, self.domain.state_space_dims))
rewards = np.empty(ns_samples)
ns_samples_ = old_div(ns_samples, self.continuous_state_starting_samples)
for i in range(self.continuous_state_starting_samples):
new_s = s.copy()
for d in range(self.domain.state_space_dims):
w = self.binWidth_per_dim[d]
new_s[d] = (((self.random_state.rand() - 0.5) * w) + s[d])
if (not (d in self.domain.continuous_dims)):
new_s[d] = int(new_s[d])
(ns, r) = self.domain.sampleStep(new_s, a, ns_samples_)
next_states[((i * ns_samples_):((i + 1) * ns_samples_), :)] = ns
rewards[(i * ns_samples_):((i + 1) * ns_samples_)] = r
else:
(next_states, rewards) = self.domain.sampleStep(s, a, ns_samples)
self.expectedStepCached[key] = [next_states, rewards]
else:
(next_states, rewards) = cacheHit
if (policy is None):
Q = np.mean([(rewards[i] + (discount_factor * self.V(next_states[(i, :)]))) for i in range(ns_samples)])
else:
Q = np.mean([(rewards[i] + (discount_factor * self.Q(next_states[(i, :)], policy.pi(next_states[(i, :)])))) for i in range(ns_samples)])
return Q
|
def Q_oneStepLookAhead(self, s, a, ns_samples, policy=None):
'\n Returns the state action value, Q(s,a), by performing one step\n look-ahead on the domain.\n\n .. note::\n For an example of how this function works, see\n `Line 8 of Figure 4.3 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_\n in Sutton and Barto 1998.\n\n If the domain does not define ``expectedStep()``, this function uses\n ``ns_samples`` samples to estimate the one_step look-ahead.\n If a policy is passed (used in the policy evaluation), it is used to\n generate the action for the next state.\n Otherwise the best action is selected.\n\n .. note::\n This function should not be called in any RL algorithms unless\n the underlying domain is an approximation of the true model.\n\n :param s: The given state\n :param a: The given action\n :param ns_samples: The number of samples used to estimate the one_step look-ahead.\n :param policy: (optional) Used to select the action in the next state\n (*after* taking action a) when estimating the one_step look-aghead.\n If ``policy == None``, the best action will be selected.\n\n :return: The one-step lookahead state-action value, Q(s,a).\n '
self.continuous_state_starting_samples = 10
if hasFunction(self, 'addState'):
self.addState(s)
discount_factor = self.domain.discount_factor
if hasFunction(self.domain, 'expectedStep'):
(p, r, ns, t, p_actions) = self.domain.expectedStep(s, a)
Q = 0
for j in range(len(p)):
if (policy is None):
Q += (p[(j, 0)] * (r[(j, 0)] + (discount_factor * self.V(ns[(j, :)], t[(j, :)], p_actions[j]))))
elif len(self.domain.possibleActions(ns[(j, :)])):
na = policy.pi(ns[(j, :)], t[(j, :)], self.domain.possibleActions(ns[(j, :)]))
Q += (p[(j, 0)] * (r[(j, 0)] + (discount_factor * self.Q(ns[(j, :)], t[(j, :)], na))))
else:
key = tuple(np.hstack((s, [a])))
cacheHit = self.expectedStepCached.get(key)
if (cacheHit is None):
s = self.stateInTheMiddleOfGrid(s)
if len(self.domain.continuous_dims):
next_states = np.empty((ns_samples, self.domain.state_space_dims))
rewards = np.empty(ns_samples)
ns_samples_ = old_div(ns_samples, self.continuous_state_starting_samples)
for i in range(self.continuous_state_starting_samples):
new_s = s.copy()
for d in range(self.domain.state_space_dims):
w = self.binWidth_per_dim[d]
new_s[d] = (((self.random_state.rand() - 0.5) * w) + s[d])
if (not (d in self.domain.continuous_dims)):
new_s[d] = int(new_s[d])
(ns, r) = self.domain.sampleStep(new_s, a, ns_samples_)
next_states[((i * ns_samples_):((i + 1) * ns_samples_), :)] = ns
rewards[(i * ns_samples_):((i + 1) * ns_samples_)] = r
else:
(next_states, rewards) = self.domain.sampleStep(s, a, ns_samples)
self.expectedStepCached[key] = [next_states, rewards]
else:
(next_states, rewards) = cacheHit
if (policy is None):
Q = np.mean([(rewards[i] + (discount_factor * self.V(next_states[(i, :)]))) for i in range(ns_samples)])
else:
Q = np.mean([(rewards[i] + (discount_factor * self.Q(next_states[(i, :)], policy.pi(next_states[(i, :)])))) for i in range(ns_samples)])
return Q<|docstring|>Returns the state action value, Q(s,a), by performing one step
look-ahead on the domain.
.. note::
For an example of how this function works, see
`Line 8 of Figure 4.3 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_
in Sutton and Barto 1998.
If the domain does not define ``expectedStep()``, this function uses
``ns_samples`` samples to estimate the one_step look-ahead.
If a policy is passed (used in the policy evaluation), it is used to
generate the action for the next state.
Otherwise the best action is selected.
.. note::
This function should not be called in any RL algorithms unless
the underlying domain is an approximation of the true model.
:param s: The given state
:param a: The given action
:param ns_samples: The number of samples used to estimate the one_step look-ahead.
:param policy: (optional) Used to select the action in the next state
(*after* taking action a) when estimating the one_step look-aghead.
If ``policy == None``, the best action will be selected.
:return: The one-step lookahead state-action value, Q(s,a).<|endoftext|>
|
365430ab4ccbce11cad1adccd4ffcf3a362c7aa02de4fb01f686e0fdcaa2aabb
|
def Qs_oneStepLookAhead(self, s, ns_samples, policy=None):
'\n Returns an array of actions and their associated values Q(s,a),\n by performing one step look-ahead on the domain for each of them.\n\n .. note::\n For an example of how this function works, see\n `Line 8 of Figure 4.3 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_\n in Sutton and Barto 1998.\n\n If the domain does not define ``expectedStep()``, this function uses\n ``ns_samples`` samples to estimate the one_step look-ahead.\n If a policy is passed (used in the policy evaluation), it is used to\n generate the action for the next state.\n Otherwise the best action is selected.\n\n .. note::\n This function should not be called in any RL algorithms unless\n the underlying domain is an approximation of the true model.\n\n :param s: The given state\n :param ns_samples: The number of samples used to estimate the one_step look-ahead.\n :param policy: (optional) Used to select the action in the next state\n (*after* taking action a) when estimating the one_step look-aghead.\n If ``policy == None``, the best action will be selected.\n\n :return: an array of length `|A|` containing the *Q(s,a)* for each\n possible *a*, where `|A|` is the number of possible actions from state *s*\n '
actions = self.domain.possibleActions(s)
Qs = np.array([self.Q_oneStepLookAhead(s, a, ns_samples, policy) for a in actions])
return (Qs, actions)
|
Returns an array of actions and their associated values Q(s,a),
by performing one step look-ahead on the domain for each of them.
.. note::
For an example of how this function works, see
`Line 8 of Figure 4.3 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_
in Sutton and Barto 1998.
If the domain does not define ``expectedStep()``, this function uses
``ns_samples`` samples to estimate the one_step look-ahead.
If a policy is passed (used in the policy evaluation), it is used to
generate the action for the next state.
Otherwise the best action is selected.
.. note::
This function should not be called in any RL algorithms unless
the underlying domain is an approximation of the true model.
:param s: The given state
:param ns_samples: The number of samples used to estimate the one_step look-ahead.
:param policy: (optional) Used to select the action in the next state
(*after* taking action a) when estimating the one_step look-aghead.
If ``policy == None``, the best action will be selected.
:return: an array of length `|A|` containing the *Q(s,a)* for each
possible *a*, where `|A|` is the number of possible actions from state *s*
|
rlpy/Representations/Representation.py
|
Qs_oneStepLookAhead
|
okkhoy/rlpy
| 265 |
python
|
def Qs_oneStepLookAhead(self, s, ns_samples, policy=None):
'\n Returns an array of actions and their associated values Q(s,a),\n by performing one step look-ahead on the domain for each of them.\n\n .. note::\n For an example of how this function works, see\n `Line 8 of Figure 4.3 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_\n in Sutton and Barto 1998.\n\n If the domain does not define ``expectedStep()``, this function uses\n ``ns_samples`` samples to estimate the one_step look-ahead.\n If a policy is passed (used in the policy evaluation), it is used to\n generate the action for the next state.\n Otherwise the best action is selected.\n\n .. note::\n This function should not be called in any RL algorithms unless\n the underlying domain is an approximation of the true model.\n\n :param s: The given state\n :param ns_samples: The number of samples used to estimate the one_step look-ahead.\n :param policy: (optional) Used to select the action in the next state\n (*after* taking action a) when estimating the one_step look-aghead.\n If ``policy == None``, the best action will be selected.\n\n :return: an array of length `|A|` containing the *Q(s,a)* for each\n possible *a*, where `|A|` is the number of possible actions from state *s*\n '
actions = self.domain.possibleActions(s)
Qs = np.array([self.Q_oneStepLookAhead(s, a, ns_samples, policy) for a in actions])
return (Qs, actions)
|
def Qs_oneStepLookAhead(self, s, ns_samples, policy=None):
'\n Returns an array of actions and their associated values Q(s,a),\n by performing one step look-ahead on the domain for each of them.\n\n .. note::\n For an example of how this function works, see\n `Line 8 of Figure 4.3 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_\n in Sutton and Barto 1998.\n\n If the domain does not define ``expectedStep()``, this function uses\n ``ns_samples`` samples to estimate the one_step look-ahead.\n If a policy is passed (used in the policy evaluation), it is used to\n generate the action for the next state.\n Otherwise the best action is selected.\n\n .. note::\n This function should not be called in any RL algorithms unless\n the underlying domain is an approximation of the true model.\n\n :param s: The given state\n :param ns_samples: The number of samples used to estimate the one_step look-ahead.\n :param policy: (optional) Used to select the action in the next state\n (*after* taking action a) when estimating the one_step look-aghead.\n If ``policy == None``, the best action will be selected.\n\n :return: an array of length `|A|` containing the *Q(s,a)* for each\n possible *a*, where `|A|` is the number of possible actions from state *s*\n '
actions = self.domain.possibleActions(s)
Qs = np.array([self.Q_oneStepLookAhead(s, a, ns_samples, policy) for a in actions])
return (Qs, actions)<|docstring|>Returns an array of actions and their associated values Q(s,a),
by performing one step look-ahead on the domain for each of them.
.. note::
For an example of how this function works, see
`Line 8 of Figure 4.3 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_
in Sutton and Barto 1998.
If the domain does not define ``expectedStep()``, this function uses
``ns_samples`` samples to estimate the one_step look-ahead.
If a policy is passed (used in the policy evaluation), it is used to
generate the action for the next state.
Otherwise the best action is selected.
.. note::
This function should not be called in any RL algorithms unless
the underlying domain is an approximation of the true model.
:param s: The given state
:param ns_samples: The number of samples used to estimate the one_step look-ahead.
:param policy: (optional) Used to select the action in the next state
(*after* taking action a) when estimating the one_step look-aghead.
If ``policy == None``, the best action will be selected.
:return: an array of length `|A|` containing the *Q(s,a)* for each
possible *a*, where `|A|` is the number of possible actions from state *s*<|endoftext|>
|
da984124c20392199e74c6e46080f3fe5cff5a9f7d2729851ae62ed9c71d0564
|
def V_oneStepLookAhead(self, s, ns_samples):
'\n Returns the value of being in state *s*, V(s),\n by performing one step look-ahead on the domain.\n\n .. note::\n For an example of how this function works, see\n `Line 6 of Figure 4.5 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_\n in Sutton and Barto 1998.\n\n If the domain does not define ``expectedStep()``, this function uses\n ``ns_samples`` samples to estimate the one_step look-ahead.\n\n .. note::\n This function should not be called in any RL algorithms unless\n the underlying domain is an approximation of the true model.\n\n :param s: The given state\n :param ns_samples: The number of samples used to estimate the one_step look-ahead.\n\n :return: The value of being in state *s*, *V(s)*.\n '
(Qs, actions) = self.Qs_oneStepLookAhead(s, ns_samples)
a_ind = np.argmax(Qs)
return (Qs[a_ind], actions[a_ind])
|
Returns the value of being in state *s*, V(s),
by performing one step look-ahead on the domain.
.. note::
For an example of how this function works, see
`Line 6 of Figure 4.5 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_
in Sutton and Barto 1998.
If the domain does not define ``expectedStep()``, this function uses
``ns_samples`` samples to estimate the one_step look-ahead.
.. note::
This function should not be called in any RL algorithms unless
the underlying domain is an approximation of the true model.
:param s: The given state
:param ns_samples: The number of samples used to estimate the one_step look-ahead.
:return: The value of being in state *s*, *V(s)*.
|
rlpy/Representations/Representation.py
|
V_oneStepLookAhead
|
okkhoy/rlpy
| 265 |
python
|
def V_oneStepLookAhead(self, s, ns_samples):
'\n Returns the value of being in state *s*, V(s),\n by performing one step look-ahead on the domain.\n\n .. note::\n For an example of how this function works, see\n `Line 6 of Figure 4.5 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_\n in Sutton and Barto 1998.\n\n If the domain does not define ``expectedStep()``, this function uses\n ``ns_samples`` samples to estimate the one_step look-ahead.\n\n .. note::\n This function should not be called in any RL algorithms unless\n the underlying domain is an approximation of the true model.\n\n :param s: The given state\n :param ns_samples: The number of samples used to estimate the one_step look-ahead.\n\n :return: The value of being in state *s*, *V(s)*.\n '
(Qs, actions) = self.Qs_oneStepLookAhead(s, ns_samples)
a_ind = np.argmax(Qs)
return (Qs[a_ind], actions[a_ind])
|
def V_oneStepLookAhead(self, s, ns_samples):
'\n Returns the value of being in state *s*, V(s),\n by performing one step look-ahead on the domain.\n\n .. note::\n For an example of how this function works, see\n `Line 6 of Figure 4.5 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_\n in Sutton and Barto 1998.\n\n If the domain does not define ``expectedStep()``, this function uses\n ``ns_samples`` samples to estimate the one_step look-ahead.\n\n .. note::\n This function should not be called in any RL algorithms unless\n the underlying domain is an approximation of the true model.\n\n :param s: The given state\n :param ns_samples: The number of samples used to estimate the one_step look-ahead.\n\n :return: The value of being in state *s*, *V(s)*.\n '
(Qs, actions) = self.Qs_oneStepLookAhead(s, ns_samples)
a_ind = np.argmax(Qs)
return (Qs[a_ind], actions[a_ind])<|docstring|>Returns the value of being in state *s*, V(s),
by performing one step look-ahead on the domain.
.. note::
For an example of how this function works, see
`Line 6 of Figure 4.5 <http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node43.html>`_
in Sutton and Barto 1998.
If the domain does not define ``expectedStep()``, this function uses
``ns_samples`` samples to estimate the one_step look-ahead.
.. note::
This function should not be called in any RL algorithms unless
the underlying domain is an approximation of the true model.
:param s: The given state
:param ns_samples: The number of samples used to estimate the one_step look-ahead.
:return: The value of being in state *s*, *V(s)*.<|endoftext|>
|
daef4e381011f92eb86fa2c93b9b8b829b2ec76189f8fff2485766c2a6c41a74
|
def stateID2state(self, s_id):
'\n Returns the state vector correponding to a state_id.\n If dimensions are continuous it returns the state representing the\n middle of the bin (each dimension is discretized according to\n ``representation.discretization``.\n\n :param s_id: The id of the state, often calculated using the\n ``state2bin`` function\n\n :return: The state *s* corresponding to the integer *s_id*.\n '
s = np.array(id2vec(s_id, self.bins_per_dim))
for d in range(self.domain.state_space_dims):
s[d] = bin2state(s[d], self.bins_per_dim[d], self.domain.statespace_limits[(d, :)])
if (len(self.domain.continuous_dims) == 0):
s = s.astype(int)
return s
|
Returns the state vector correponding to a state_id.
If dimensions are continuous it returns the state representing the
middle of the bin (each dimension is discretized according to
``representation.discretization``.
:param s_id: The id of the state, often calculated using the
``state2bin`` function
:return: The state *s* corresponding to the integer *s_id*.
|
rlpy/Representations/Representation.py
|
stateID2state
|
okkhoy/rlpy
| 265 |
python
|
def stateID2state(self, s_id):
'\n Returns the state vector correponding to a state_id.\n If dimensions are continuous it returns the state representing the\n middle of the bin (each dimension is discretized according to\n ``representation.discretization``.\n\n :param s_id: The id of the state, often calculated using the\n ``state2bin`` function\n\n :return: The state *s* corresponding to the integer *s_id*.\n '
s = np.array(id2vec(s_id, self.bins_per_dim))
for d in range(self.domain.state_space_dims):
s[d] = bin2state(s[d], self.bins_per_dim[d], self.domain.statespace_limits[(d, :)])
if (len(self.domain.continuous_dims) == 0):
s = s.astype(int)
return s
|
def stateID2state(self, s_id):
'\n Returns the state vector correponding to a state_id.\n If dimensions are continuous it returns the state representing the\n middle of the bin (each dimension is discretized according to\n ``representation.discretization``.\n\n :param s_id: The id of the state, often calculated using the\n ``state2bin`` function\n\n :return: The state *s* corresponding to the integer *s_id*.\n '
s = np.array(id2vec(s_id, self.bins_per_dim))
for d in range(self.domain.state_space_dims):
s[d] = bin2state(s[d], self.bins_per_dim[d], self.domain.statespace_limits[(d, :)])
if (len(self.domain.continuous_dims) == 0):
s = s.astype(int)
return s<|docstring|>Returns the state vector correponding to a state_id.
If dimensions are continuous it returns the state representing the
middle of the bin (each dimension is discretized according to
``representation.discretization``.
:param s_id: The id of the state, often calculated using the
``state2bin`` function
:return: The state *s* corresponding to the integer *s_id*.<|endoftext|>
|
79fdcb93e5b957d3e62236e1451b949140772aa25c9f19c209b02df2b4f730de
|
def stateInTheMiddleOfGrid(self, s):
'\n Accepts a continuous state *s*, bins it into the discretized domain,\n and returns the state of the nearest gridpoint.\n Essentially, we snap *s* to the nearest gridpoint and return that\n gridpoint state.\n For continuous MDPs this plays a major rule in improving the speed\n through caching of next samples.\n\n :param s: The given state\n\n :return: The nearest state *s* which is captured by the discretization.\n '
s_normalized = s.copy()
for d in range(self.domain.state_space_dims):
s_normalized[d] = closestDiscretization(s[d], self.bins_per_dim[d], self.domain.statespace_limits[(d, :)])
return s_normalized
|
Accepts a continuous state *s*, bins it into the discretized domain,
and returns the state of the nearest gridpoint.
Essentially, we snap *s* to the nearest gridpoint and return that
gridpoint state.
For continuous MDPs this plays a major rule in improving the speed
through caching of next samples.
:param s: The given state
:return: The nearest state *s* which is captured by the discretization.
|
rlpy/Representations/Representation.py
|
stateInTheMiddleOfGrid
|
okkhoy/rlpy
| 265 |
python
|
def stateInTheMiddleOfGrid(self, s):
'\n Accepts a continuous state *s*, bins it into the discretized domain,\n and returns the state of the nearest gridpoint.\n Essentially, we snap *s* to the nearest gridpoint and return that\n gridpoint state.\n For continuous MDPs this plays a major rule in improving the speed\n through caching of next samples.\n\n :param s: The given state\n\n :return: The nearest state *s* which is captured by the discretization.\n '
s_normalized = s.copy()
for d in range(self.domain.state_space_dims):
s_normalized[d] = closestDiscretization(s[d], self.bins_per_dim[d], self.domain.statespace_limits[(d, :)])
return s_normalized
|
def stateInTheMiddleOfGrid(self, s):
'\n Accepts a continuous state *s*, bins it into the discretized domain,\n and returns the state of the nearest gridpoint.\n Essentially, we snap *s* to the nearest gridpoint and return that\n gridpoint state.\n For continuous MDPs this plays a major rule in improving the speed\n through caching of next samples.\n\n :param s: The given state\n\n :return: The nearest state *s* which is captured by the discretization.\n '
s_normalized = s.copy()
for d in range(self.domain.state_space_dims):
s_normalized[d] = closestDiscretization(s[d], self.bins_per_dim[d], self.domain.statespace_limits[(d, :)])
return s_normalized<|docstring|>Accepts a continuous state *s*, bins it into the discretized domain,
and returns the state of the nearest gridpoint.
Essentially, we snap *s* to the nearest gridpoint and return that
gridpoint state.
For continuous MDPs this plays a major rule in improving the speed
through caching of next samples.
:param s: The given state
:return: The nearest state *s* which is captured by the discretization.<|endoftext|>
|
76ade592d34dd7a71fdfcbe9fd1914afc8b8f9c0bf5be7bed2221cd6ba8bcd73
|
def featureLearningRate(self):
'\n :return: An array or scalar used to adapt the learning rate of each\n feature individually.\n '
return 1.0
|
:return: An array or scalar used to adapt the learning rate of each
feature individually.
|
rlpy/Representations/Representation.py
|
featureLearningRate
|
okkhoy/rlpy
| 265 |
python
|
def featureLearningRate(self):
'\n :return: An array or scalar used to adapt the learning rate of each\n feature individually.\n '
return 1.0
|
def featureLearningRate(self):
'\n :return: An array or scalar used to adapt the learning rate of each\n feature individually.\n '
return 1.0<|docstring|>:return: An array or scalar used to adapt the learning rate of each
feature individually.<|endoftext|>
|
5bf30a0fc5daa797742470d031c58193d9d140f5f4b13437e96b5465fc8206cc
|
def logspace_int(limit, num=50):
'\n Returns integers spaced (approximately) evenly on a log scale.\n\n This means the integers are exponentially separated on a linear scale. The\n restriction to integers means that the spacing is not exactly even on a log\n scale. In particular, the smaller integers can grow linearly. This provides\n more coverage at the small scale, which is often desirable when subsampling\n a large array such that you have less coverage at the tail end (and more\n at the beginning).\n\n This function is behaves nicer than calling `np.logspace(...).astype(int)`,\n or something similar, as those approaches will contain duplicate integers.\n\n Parameters\n ----------\n limit : int\n The maximum possible integer.\n num : int, optional\n Number of samples to generate. Default is 50.\n\n Returns\n -------\n samples : NumPy array\n The `num` logarithmically spaced integer samples.\n\n References\n ----------\n .. [1] http://stackoverflow.com/a/12421820\n\n '
if (limit <= 0):
raise Exception('`limit` must be greater than zero.')
if (num == 0):
return np.array([], dtype=np.uint64)
elif (num == 1):
return np.array([0], dtype=np.uint64)
if (limit < num):
msg = 'Not enough integers between 0 and {0}'.format(limit)
raise Exception(msg)
result = [1]
if (num > 1):
ratio = ((limit / result[(- 1)]) ** (1 / (num - len(result))))
while (len(result) < num):
next_value = (result[(- 1)] * ratio)
if ((next_value - result[(- 1)]) >= 1):
result.append(next_value)
else:
result.append((result[(- 1)] + 1))
ratio = ((limit / result[(- 1)]) ** (1 / (num - len(result))))
result = (np.round(result) - 1)
return result.astype(np.int64)
|
Returns integers spaced (approximately) evenly on a log scale.
This means the integers are exponentially separated on a linear scale. The
restriction to integers means that the spacing is not exactly even on a log
scale. In particular, the smaller integers can grow linearly. This provides
more coverage at the small scale, which is often desirable when subsampling
a large array such that you have less coverage at the tail end (and more
at the beginning).
This function is behaves nicer than calling `np.logspace(...).astype(int)`,
or something similar, as those approaches will contain duplicate integers.
Parameters
----------
limit : int
The maximum possible integer.
num : int, optional
Number of samples to generate. Default is 50.
Returns
-------
samples : NumPy array
The `num` logarithmically spaced integer samples.
References
----------
.. [1] http://stackoverflow.com/a/12421820
|
buhmm/misc.py
|
logspace_int
|
chebee7i/buhmm
| 4 |
python
|
def logspace_int(limit, num=50):
'\n Returns integers spaced (approximately) evenly on a log scale.\n\n This means the integers are exponentially separated on a linear scale. The\n restriction to integers means that the spacing is not exactly even on a log\n scale. In particular, the smaller integers can grow linearly. This provides\n more coverage at the small scale, which is often desirable when subsampling\n a large array such that you have less coverage at the tail end (and more\n at the beginning).\n\n This function is behaves nicer than calling `np.logspace(...).astype(int)`,\n or something similar, as those approaches will contain duplicate integers.\n\n Parameters\n ----------\n limit : int\n The maximum possible integer.\n num : int, optional\n Number of samples to generate. Default is 50.\n\n Returns\n -------\n samples : NumPy array\n The `num` logarithmically spaced integer samples.\n\n References\n ----------\n .. [1] http://stackoverflow.com/a/12421820\n\n '
if (limit <= 0):
raise Exception('`limit` must be greater than zero.')
if (num == 0):
return np.array([], dtype=np.uint64)
elif (num == 1):
return np.array([0], dtype=np.uint64)
if (limit < num):
msg = 'Not enough integers between 0 and {0}'.format(limit)
raise Exception(msg)
result = [1]
if (num > 1):
ratio = ((limit / result[(- 1)]) ** (1 / (num - len(result))))
while (len(result) < num):
next_value = (result[(- 1)] * ratio)
if ((next_value - result[(- 1)]) >= 1):
result.append(next_value)
else:
result.append((result[(- 1)] + 1))
ratio = ((limit / result[(- 1)]) ** (1 / (num - len(result))))
result = (np.round(result) - 1)
return result.astype(np.int64)
|
def logspace_int(limit, num=50):
'\n Returns integers spaced (approximately) evenly on a log scale.\n\n This means the integers are exponentially separated on a linear scale. The\n restriction to integers means that the spacing is not exactly even on a log\n scale. In particular, the smaller integers can grow linearly. This provides\n more coverage at the small scale, which is often desirable when subsampling\n a large array such that you have less coverage at the tail end (and more\n at the beginning).\n\n This function is behaves nicer than calling `np.logspace(...).astype(int)`,\n or something similar, as those approaches will contain duplicate integers.\n\n Parameters\n ----------\n limit : int\n The maximum possible integer.\n num : int, optional\n Number of samples to generate. Default is 50.\n\n Returns\n -------\n samples : NumPy array\n The `num` logarithmically spaced integer samples.\n\n References\n ----------\n .. [1] http://stackoverflow.com/a/12421820\n\n '
if (limit <= 0):
raise Exception('`limit` must be greater than zero.')
if (num == 0):
return np.array([], dtype=np.uint64)
elif (num == 1):
return np.array([0], dtype=np.uint64)
if (limit < num):
msg = 'Not enough integers between 0 and {0}'.format(limit)
raise Exception(msg)
result = [1]
if (num > 1):
ratio = ((limit / result[(- 1)]) ** (1 / (num - len(result))))
while (len(result) < num):
next_value = (result[(- 1)] * ratio)
if ((next_value - result[(- 1)]) >= 1):
result.append(next_value)
else:
result.append((result[(- 1)] + 1))
ratio = ((limit / result[(- 1)]) ** (1 / (num - len(result))))
result = (np.round(result) - 1)
return result.astype(np.int64)<|docstring|>Returns integers spaced (approximately) evenly on a log scale.
This means the integers are exponentially separated on a linear scale. The
restriction to integers means that the spacing is not exactly even on a log
scale. In particular, the smaller integers can grow linearly. This provides
more coverage at the small scale, which is often desirable when subsampling
a large array such that you have less coverage at the tail end (and more
at the beginning).
This function is behaves nicer than calling `np.logspace(...).astype(int)`,
or something similar, as those approaches will contain duplicate integers.
Parameters
----------
limit : int
The maximum possible integer.
num : int, optional
Number of samples to generate. Default is 50.
Returns
-------
samples : NumPy array
The `num` logarithmically spaced integer samples.
References
----------
.. [1] http://stackoverflow.com/a/12421820<|endoftext|>
|
69f15870710af9abac7c51888658a4eca1e960b8aba203818e7fde85357b84b1
|
def getheaders(self):
'Returns a dictionary of the response headers.'
return self.urllib3_response.getheaders()
|
Returns a dictionary of the response headers.
|
src/deutschland/zoll/rest.py
|
getheaders
|
t-huyeng/deutschland
| 445 |
python
|
def getheaders(self):
return self.urllib3_response.getheaders()
|
def getheaders(self):
return self.urllib3_response.getheaders()<|docstring|>Returns a dictionary of the response headers.<|endoftext|>
|
2716a1f3904f9b40f2821db47cfb8b28e7a243ae1a4b2f5ca91983af0f0f01fd
|
def getheader(self, name, default=None):
'Returns a given response header.'
return self.urllib3_response.getheader(name, default)
|
Returns a given response header.
|
src/deutschland/zoll/rest.py
|
getheader
|
t-huyeng/deutschland
| 445 |
python
|
def getheader(self, name, default=None):
return self.urllib3_response.getheader(name, default)
|
def getheader(self, name, default=None):
return self.urllib3_response.getheader(name, default)<|docstring|>Returns a given response header.<|endoftext|>
|
b3ee1625909ac5213d92466212649736ecefe59cf05a346ffc3685c3ba5a0fd5
|
def request(self, method, url, query_params=None, headers=None, body=None, post_params=None, _preload_content=True, _request_timeout=None):
'Perform requests.\n\n :param method: http request method\n :param url: http request url\n :param query_params: query parameters in the url\n :param headers: http request headers\n :param body: request json body, for `application/json`\n :param post_params: request post parameters,\n `application/x-www-form-urlencoded`\n and `multipart/form-data`\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n '
method = method.upper()
assert (method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT', 'PATCH', 'OPTIONS'])
if (post_params and body):
raise ApiValueError('body parameter cannot be used with post_params parameter.')
post_params = (post_params or {})
headers = (headers or {})
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, float)):
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and (len(_request_timeout) == 2)):
timeout = urllib3.Timeout(connect=_request_timeout[0], read=_request_timeout[1])
try:
if (method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']):
if ((method != 'DELETE') and ('Content-Type' not in headers)):
headers['Content-Type'] = 'application/json'
if query_params:
url += ('?' + urlencode(query_params))
if (('Content-Type' not in headers) or re.search('json', headers['Content-Type'], re.IGNORECASE)):
request_body = None
if (body is not None):
request_body = json.dumps(body)
r = self.pool_manager.request(method, url, body=request_body, preload_content=_preload_content, timeout=timeout, headers=headers)
elif (headers['Content-Type'] == 'application/x-www-form-urlencoded'):
r = self.pool_manager.request(method, url, fields=post_params, encode_multipart=False, preload_content=_preload_content, timeout=timeout, headers=headers)
elif (headers['Content-Type'] == 'multipart/form-data'):
del headers['Content-Type']
r = self.pool_manager.request(method, url, fields=post_params, encode_multipart=True, preload_content=_preload_content, timeout=timeout, headers=headers)
elif (isinstance(body, str) or isinstance(body, bytes)):
request_body = body
r = self.pool_manager.request(method, url, body=request_body, preload_content=_preload_content, timeout=timeout, headers=headers)
else:
msg = 'Cannot prepare a request message for provided\n arguments. Please check that your arguments match\n declared content type.'
raise ApiException(status=0, reason=msg)
else:
r = self.pool_manager.request(method, url, fields=query_params, preload_content=_preload_content, timeout=timeout, headers=headers)
except urllib3.exceptions.SSLError as e:
msg = '{0}\n{1}'.format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
logger.debug('response body: %s', r.data)
if (not (200 <= r.status <= 299)):
if (r.status == 401):
raise UnauthorizedException(http_resp=r)
if (r.status == 403):
raise ForbiddenException(http_resp=r)
if (r.status == 404):
raise NotFoundException(http_resp=r)
if (500 <= r.status <= 599):
raise ServiceException(http_resp=r)
raise ApiException(http_resp=r)
return r
|
Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
|
src/deutschland/zoll/rest.py
|
request
|
t-huyeng/deutschland
| 445 |
python
|
def request(self, method, url, query_params=None, headers=None, body=None, post_params=None, _preload_content=True, _request_timeout=None):
'Perform requests.\n\n :param method: http request method\n :param url: http request url\n :param query_params: query parameters in the url\n :param headers: http request headers\n :param body: request json body, for `application/json`\n :param post_params: request post parameters,\n `application/x-www-form-urlencoded`\n and `multipart/form-data`\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n '
method = method.upper()
assert (method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT', 'PATCH', 'OPTIONS'])
if (post_params and body):
raise ApiValueError('body parameter cannot be used with post_params parameter.')
post_params = (post_params or {})
headers = (headers or {})
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, float)):
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and (len(_request_timeout) == 2)):
timeout = urllib3.Timeout(connect=_request_timeout[0], read=_request_timeout[1])
try:
if (method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']):
if ((method != 'DELETE') and ('Content-Type' not in headers)):
headers['Content-Type'] = 'application/json'
if query_params:
url += ('?' + urlencode(query_params))
if (('Content-Type' not in headers) or re.search('json', headers['Content-Type'], re.IGNORECASE)):
request_body = None
if (body is not None):
request_body = json.dumps(body)
r = self.pool_manager.request(method, url, body=request_body, preload_content=_preload_content, timeout=timeout, headers=headers)
elif (headers['Content-Type'] == 'application/x-www-form-urlencoded'):
r = self.pool_manager.request(method, url, fields=post_params, encode_multipart=False, preload_content=_preload_content, timeout=timeout, headers=headers)
elif (headers['Content-Type'] == 'multipart/form-data'):
del headers['Content-Type']
r = self.pool_manager.request(method, url, fields=post_params, encode_multipart=True, preload_content=_preload_content, timeout=timeout, headers=headers)
elif (isinstance(body, str) or isinstance(body, bytes)):
request_body = body
r = self.pool_manager.request(method, url, body=request_body, preload_content=_preload_content, timeout=timeout, headers=headers)
else:
msg = 'Cannot prepare a request message for provided\n arguments. Please check that your arguments match\n declared content type.'
raise ApiException(status=0, reason=msg)
else:
r = self.pool_manager.request(method, url, fields=query_params, preload_content=_preload_content, timeout=timeout, headers=headers)
except urllib3.exceptions.SSLError as e:
msg = '{0}\n{1}'.format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
logger.debug('response body: %s', r.data)
if (not (200 <= r.status <= 299)):
if (r.status == 401):
raise UnauthorizedException(http_resp=r)
if (r.status == 403):
raise ForbiddenException(http_resp=r)
if (r.status == 404):
raise NotFoundException(http_resp=r)
if (500 <= r.status <= 599):
raise ServiceException(http_resp=r)
raise ApiException(http_resp=r)
return r
|
def request(self, method, url, query_params=None, headers=None, body=None, post_params=None, _preload_content=True, _request_timeout=None):
'Perform requests.\n\n :param method: http request method\n :param url: http request url\n :param query_params: query parameters in the url\n :param headers: http request headers\n :param body: request json body, for `application/json`\n :param post_params: request post parameters,\n `application/x-www-form-urlencoded`\n and `multipart/form-data`\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n '
method = method.upper()
assert (method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT', 'PATCH', 'OPTIONS'])
if (post_params and body):
raise ApiValueError('body parameter cannot be used with post_params parameter.')
post_params = (post_params or {})
headers = (headers or {})
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, float)):
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and (len(_request_timeout) == 2)):
timeout = urllib3.Timeout(connect=_request_timeout[0], read=_request_timeout[1])
try:
if (method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']):
if ((method != 'DELETE') and ('Content-Type' not in headers)):
headers['Content-Type'] = 'application/json'
if query_params:
url += ('?' + urlencode(query_params))
if (('Content-Type' not in headers) or re.search('json', headers['Content-Type'], re.IGNORECASE)):
request_body = None
if (body is not None):
request_body = json.dumps(body)
r = self.pool_manager.request(method, url, body=request_body, preload_content=_preload_content, timeout=timeout, headers=headers)
elif (headers['Content-Type'] == 'application/x-www-form-urlencoded'):
r = self.pool_manager.request(method, url, fields=post_params, encode_multipart=False, preload_content=_preload_content, timeout=timeout, headers=headers)
elif (headers['Content-Type'] == 'multipart/form-data'):
del headers['Content-Type']
r = self.pool_manager.request(method, url, fields=post_params, encode_multipart=True, preload_content=_preload_content, timeout=timeout, headers=headers)
elif (isinstance(body, str) or isinstance(body, bytes)):
request_body = body
r = self.pool_manager.request(method, url, body=request_body, preload_content=_preload_content, timeout=timeout, headers=headers)
else:
msg = 'Cannot prepare a request message for provided\n arguments. Please check that your arguments match\n declared content type.'
raise ApiException(status=0, reason=msg)
else:
r = self.pool_manager.request(method, url, fields=query_params, preload_content=_preload_content, timeout=timeout, headers=headers)
except urllib3.exceptions.SSLError as e:
msg = '{0}\n{1}'.format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
logger.debug('response body: %s', r.data)
if (not (200 <= r.status <= 299)):
if (r.status == 401):
raise UnauthorizedException(http_resp=r)
if (r.status == 403):
raise ForbiddenException(http_resp=r)
if (r.status == 404):
raise NotFoundException(http_resp=r)
if (500 <= r.status <= 599):
raise ServiceException(http_resp=r)
raise ApiException(http_resp=r)
return r<|docstring|>Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.<|endoftext|>
|
da66e060640ba8fb52566d63c658f73a572284dbbbd688b916195de1ecd61ec2
|
def add(self, *args):
'Add a new object to this container.\n\n Generally this method should only be used during data loading, since\n adding data during a test can affect the results of other tests.\n '
for obj in args:
if (obj not in self._objects):
self._objects.append(obj)
|
Add a new object to this container.
Generally this method should only be used during data loading, since
adding data during a test can affect the results of other tests.
|
openstack_dashboard/test/test_data/utils.py
|
add
|
rishavtandon93/horizon
| 930 |
python
|
def add(self, *args):
'Add a new object to this container.\n\n Generally this method should only be used during data loading, since\n adding data during a test can affect the results of other tests.\n '
for obj in args:
if (obj not in self._objects):
self._objects.append(obj)
|
def add(self, *args):
'Add a new object to this container.\n\n Generally this method should only be used during data loading, since\n adding data during a test can affect the results of other tests.\n '
for obj in args:
if (obj not in self._objects):
self._objects.append(obj)<|docstring|>Add a new object to this container.
Generally this method should only be used during data loading, since
adding data during a test can affect the results of other tests.<|endoftext|>
|
d870d1fd39ddeedd9b25d2ca843aa4dbc0e39a3345cf1c652d0d094c2148e89c
|
def list(self):
'Returns a list of all objects in this container.'
return self._objects
|
Returns a list of all objects in this container.
|
openstack_dashboard/test/test_data/utils.py
|
list
|
rishavtandon93/horizon
| 930 |
python
|
def list(self):
return self._objects
|
def list(self):
return self._objects<|docstring|>Returns a list of all objects in this container.<|endoftext|>
|
766ce754eb21f5566d9e0f274d2e301184f12309a50c05d90abf921c472c9824
|
def filter(self, filtered=None, **kwargs):
'Returns objects whose attributes match the given kwargs.'
if (filtered is None):
filtered = self._objects
try:
(key, value) = kwargs.popitem()
except KeyError:
return filtered
def get_match(obj):
return (hasattr(obj, key) and (getattr(obj, key) == value))
filtered = [obj for obj in filtered if get_match(obj)]
return self.filter(filtered=filtered, **kwargs)
|
Returns objects whose attributes match the given kwargs.
|
openstack_dashboard/test/test_data/utils.py
|
filter
|
rishavtandon93/horizon
| 930 |
python
|
def filter(self, filtered=None, **kwargs):
if (filtered is None):
filtered = self._objects
try:
(key, value) = kwargs.popitem()
except KeyError:
return filtered
def get_match(obj):
return (hasattr(obj, key) and (getattr(obj, key) == value))
filtered = [obj for obj in filtered if get_match(obj)]
return self.filter(filtered=filtered, **kwargs)
|
def filter(self, filtered=None, **kwargs):
if (filtered is None):
filtered = self._objects
try:
(key, value) = kwargs.popitem()
except KeyError:
return filtered
def get_match(obj):
return (hasattr(obj, key) and (getattr(obj, key) == value))
filtered = [obj for obj in filtered if get_match(obj)]
return self.filter(filtered=filtered, **kwargs)<|docstring|>Returns objects whose attributes match the given kwargs.<|endoftext|>
|
1563f8375ac9d9b734092966dc8a0cfc999bcc604f44c47ea706cb3bc9b7bbd1
|
def get(self, **kwargs):
"Returns a single object whose attributes match the given kwargs.\n\n An error will be raised if the arguments\n provided don't return exactly one match.\n "
matches = self.filter(**kwargs)
if (not matches):
raise Exception('No matches found.')
elif (len(matches) > 1):
raise Exception('Multiple matches found.')
else:
return matches.pop()
|
Returns a single object whose attributes match the given kwargs.
An error will be raised if the arguments
provided don't return exactly one match.
|
openstack_dashboard/test/test_data/utils.py
|
get
|
rishavtandon93/horizon
| 930 |
python
|
def get(self, **kwargs):
"Returns a single object whose attributes match the given kwargs.\n\n An error will be raised if the arguments\n provided don't return exactly one match.\n "
matches = self.filter(**kwargs)
if (not matches):
raise Exception('No matches found.')
elif (len(matches) > 1):
raise Exception('Multiple matches found.')
else:
return matches.pop()
|
def get(self, **kwargs):
"Returns a single object whose attributes match the given kwargs.\n\n An error will be raised if the arguments\n provided don't return exactly one match.\n "
matches = self.filter(**kwargs)
if (not matches):
raise Exception('No matches found.')
elif (len(matches) > 1):
raise Exception('Multiple matches found.')
else:
return matches.pop()<|docstring|>Returns a single object whose attributes match the given kwargs.
An error will be raised if the arguments
provided don't return exactly one match.<|endoftext|>
|
86734aa8ecf2ec0d88613dd8d60bfd04fd09cd89a78e27bff05ce58c41c9a541
|
def first(self):
'Returns the first object from this container.'
return self._objects[0]
|
Returns the first object from this container.
|
openstack_dashboard/test/test_data/utils.py
|
first
|
rishavtandon93/horizon
| 930 |
python
|
def first(self):
return self._objects[0]
|
def first(self):
return self._objects[0]<|docstring|>Returns the first object from this container.<|endoftext|>
|
d5a75a7344f556f35505b937af1fdf90da2841319a1b8a460183024a6d48e8d4
|
def sumofsq(x, axis=0):
'Helper function to calculate sum of squares along first axis'
return np.sum((x ** 2), axis=axis)
|
Helper function to calculate sum of squares along first axis
|
statsmodels/tsa/ar_model.py
|
sumofsq
|
raamana/statsmodels
| 6 |
python
|
def sumofsq(x, axis=0):
return np.sum((x ** 2), axis=axis)
|
def sumofsq(x, axis=0):
return np.sum((x ** 2), axis=axis)<|docstring|>Helper function to calculate sum of squares along first axis<|endoftext|>
|
06765e34a9829e81401ed7c4a2164718b9c1ead7ad11559e47d59fe0cce4f942
|
def initialize(self):
'Initialization of the model (no-op).'
pass
|
Initialization of the model (no-op).
|
statsmodels/tsa/ar_model.py
|
initialize
|
raamana/statsmodels
| 6 |
python
|
def initialize(self):
pass
|
def initialize(self):
pass<|docstring|>Initialization of the model (no-op).<|endoftext|>
|
8dac3f32bed1e7266a8afb64b2fb1b3295b9692f4596545ce379a038fb6f0d9f
|
def _transparams(self, params):
'\n Transforms params to induce stationarity/invertability.\n\n Reference\n ---------\n Jones(1980)\n '
p = self.k_ar
k = self.k_trend
newparams = params.copy()
newparams[k:(k + p)] = _ar_transparams(params[k:(k + p)].copy())
return newparams
|
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
|
statsmodels/tsa/ar_model.py
|
_transparams
|
raamana/statsmodels
| 6 |
python
|
def _transparams(self, params):
'\n Transforms params to induce stationarity/invertability.\n\n Reference\n ---------\n Jones(1980)\n '
p = self.k_ar
k = self.k_trend
newparams = params.copy()
newparams[k:(k + p)] = _ar_transparams(params[k:(k + p)].copy())
return newparams
|
def _transparams(self, params):
'\n Transforms params to induce stationarity/invertability.\n\n Reference\n ---------\n Jones(1980)\n '
p = self.k_ar
k = self.k_trend
newparams = params.copy()
newparams[k:(k + p)] = _ar_transparams(params[k:(k + p)].copy())
return newparams<|docstring|>Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)<|endoftext|>
|
8a6a26222ebf17109652e21220d7f63c09e6881ccf838264f03ab374674e900b
|
def _invtransparams(self, start_params):
'\n Inverse of the Jones reparameterization\n '
p = self.k_ar
k = self.k_trend
newparams = start_params.copy()
newparams[k:(k + p)] = _ar_invtransparams(start_params[k:(k + p)].copy())
return newparams
|
Inverse of the Jones reparameterization
|
statsmodels/tsa/ar_model.py
|
_invtransparams
|
raamana/statsmodels
| 6 |
python
|
def _invtransparams(self, start_params):
'\n \n '
p = self.k_ar
k = self.k_trend
newparams = start_params.copy()
newparams[k:(k + p)] = _ar_invtransparams(start_params[k:(k + p)].copy())
return newparams
|
def _invtransparams(self, start_params):
'\n \n '
p = self.k_ar
k = self.k_trend
newparams = start_params.copy()
newparams[k:(k + p)] = _ar_invtransparams(start_params[k:(k + p)].copy())
return newparams<|docstring|>Inverse of the Jones reparameterization<|endoftext|>
|
4eaecfc5856c15f5698e80749ed1ce779c6672e70fdc2debcc266071434acdf4
|
def _presample_fit(self, params, start, p, end, y, predictedvalues):
'\n Return the pre-sample predicted values using the Kalman Filter\n\n Notes\n -----\n See predict method for how to use start and p.\n '
k = self.k_trend
T_mat = KalmanFilter.T(params, p, k, p)
R_mat = KalmanFilter.R(params, p, k, 0, p)
alpha = np.zeros((p, 1))
Q_0 = np.dot(inv((np.identity((p ** 2)) - np.kron(T_mat, T_mat))), np.dot(R_mat, R_mat.T).ravel('F'))
Q_0 = Q_0.reshape(p, p, order='F')
P = Q_0
Z_mat = KalmanFilter.Z(p)
for i in range(end):
v_mat = (y[i] - np.dot(Z_mat, alpha))
F_mat = np.dot(np.dot(Z_mat, P), Z_mat.T)
Finv = (1.0 / F_mat)
K = np.dot(np.dot(np.dot(T_mat, P), Z_mat.T), Finv)
alpha = (np.dot(T_mat, alpha) + np.dot(K, v_mat))
L = (T_mat - np.dot(K, Z_mat))
P = (np.dot(np.dot(T_mat, P), L.T) + np.dot(R_mat, R_mat.T))
if (i >= (start - 1)):
predictedvalues[((i + 1) - start)] = np.dot(Z_mat, alpha)
|
Return the pre-sample predicted values using the Kalman Filter
Notes
-----
See predict method for how to use start and p.
|
statsmodels/tsa/ar_model.py
|
_presample_fit
|
raamana/statsmodels
| 6 |
python
|
def _presample_fit(self, params, start, p, end, y, predictedvalues):
'\n Return the pre-sample predicted values using the Kalman Filter\n\n Notes\n -----\n See predict method for how to use start and p.\n '
k = self.k_trend
T_mat = KalmanFilter.T(params, p, k, p)
R_mat = KalmanFilter.R(params, p, k, 0, p)
alpha = np.zeros((p, 1))
Q_0 = np.dot(inv((np.identity((p ** 2)) - np.kron(T_mat, T_mat))), np.dot(R_mat, R_mat.T).ravel('F'))
Q_0 = Q_0.reshape(p, p, order='F')
P = Q_0
Z_mat = KalmanFilter.Z(p)
for i in range(end):
v_mat = (y[i] - np.dot(Z_mat, alpha))
F_mat = np.dot(np.dot(Z_mat, P), Z_mat.T)
Finv = (1.0 / F_mat)
K = np.dot(np.dot(np.dot(T_mat, P), Z_mat.T), Finv)
alpha = (np.dot(T_mat, alpha) + np.dot(K, v_mat))
L = (T_mat - np.dot(K, Z_mat))
P = (np.dot(np.dot(T_mat, P), L.T) + np.dot(R_mat, R_mat.T))
if (i >= (start - 1)):
predictedvalues[((i + 1) - start)] = np.dot(Z_mat, alpha)
|
def _presample_fit(self, params, start, p, end, y, predictedvalues):
'\n Return the pre-sample predicted values using the Kalman Filter\n\n Notes\n -----\n See predict method for how to use start and p.\n '
k = self.k_trend
T_mat = KalmanFilter.T(params, p, k, p)
R_mat = KalmanFilter.R(params, p, k, 0, p)
alpha = np.zeros((p, 1))
Q_0 = np.dot(inv((np.identity((p ** 2)) - np.kron(T_mat, T_mat))), np.dot(R_mat, R_mat.T).ravel('F'))
Q_0 = Q_0.reshape(p, p, order='F')
P = Q_0
Z_mat = KalmanFilter.Z(p)
for i in range(end):
v_mat = (y[i] - np.dot(Z_mat, alpha))
F_mat = np.dot(np.dot(Z_mat, P), Z_mat.T)
Finv = (1.0 / F_mat)
K = np.dot(np.dot(np.dot(T_mat, P), Z_mat.T), Finv)
alpha = (np.dot(T_mat, alpha) + np.dot(K, v_mat))
L = (T_mat - np.dot(K, Z_mat))
P = (np.dot(np.dot(T_mat, P), L.T) + np.dot(R_mat, R_mat.T))
if (i >= (start - 1)):
predictedvalues[((i + 1) - start)] = np.dot(Z_mat, alpha)<|docstring|>Return the pre-sample predicted values using the Kalman Filter
Notes
-----
See predict method for how to use start and p.<|endoftext|>
|
0f004a6e6fb6cecd395d8b2285fca3a6eda3923a894b796147663ad4d51bc71e
|
def predict(self, params, start=None, end=None, dynamic=False):
'\n Construct in-sample and out-of-sample prediction.\n\n Parameters\n ----------\n params : array\n The fitted model parameters.\n start : int, str, or datetime\n Zero-indexed observation number at which to start forecasting, ie.,\n the first forecast is start. Can also be a date string to\n parse or a datetime type.\n end : int, str, or datetime\n Zero-indexed observation number at which to end forecasting, ie.,\n the first forecast is start. Can also be a date string to\n parse or a datetime type.\n dynamic : bool\n The `dynamic` keyword affects in-sample prediction. If dynamic\n is False, then the in-sample lagged values are used for\n prediction. If `dynamic` is True, then in-sample forecasts are\n used in place of lagged dependent variables. The first forecasted\n value is `start`.\n\n Returns\n -------\n array_like\n An array containing the predicted values.\n\n Notes\n -----\n The linear Gaussian Kalman filter is used to return pre-sample fitted\n values. The exact initial Kalman Filter is used. See Durbin and Koopman\n in the references for more information.\n '
if (not (hasattr(self, 'k_ar') and hasattr(self, 'k_trend'))):
raise RuntimeError('Model must be fit before calling predict')
(start, end, out_of_sample, _) = self._get_prediction_index(start, end, dynamic)
k_ar = self.k_ar
k_trend = self.k_trend
method = self.method
endog = self.endog.squeeze()
if dynamic:
out_of_sample += ((end - start) + 1)
return _ar_predict_out_of_sample(endog, params, k_ar, k_trend, out_of_sample, start)
predictedvalues = np.zeros(((end + 1) - start))
if (method == 'mle'):
if k_trend:
mu = (params[0] / (1 - np.sum(params[k_trend:])))
else:
mu = 0
if (start < k_ar):
self._presample_fit(params, start, k_ar, min((k_ar - 1), end), (endog[:k_ar] - mu), predictedvalues)
predictedvalues[:(k_ar - start)] += mu
if (end < k_ar):
return predictedvalues
fittedvalues = np.dot(self.X, params)
pv_start = max((k_ar - start), 0)
fv_start = max((start - k_ar), 0)
fv_end = min(len(fittedvalues), ((end - k_ar) + 1))
predictedvalues[pv_start:] = fittedvalues[fv_start:fv_end]
if out_of_sample:
forecastvalues = _ar_predict_out_of_sample(endog, params, k_ar, k_trend, out_of_sample)
predictedvalues = np.r_[(predictedvalues, forecastvalues)]
return predictedvalues
|
Construct in-sample and out-of-sample prediction.
Parameters
----------
params : array
The fitted model parameters.
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
dynamic : bool
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
Returns
-------
array_like
An array containing the predicted values.
Notes
-----
The linear Gaussian Kalman filter is used to return pre-sample fitted
values. The exact initial Kalman Filter is used. See Durbin and Koopman
in the references for more information.
|
statsmodels/tsa/ar_model.py
|
predict
|
raamana/statsmodels
| 6 |
python
|
def predict(self, params, start=None, end=None, dynamic=False):
'\n Construct in-sample and out-of-sample prediction.\n\n Parameters\n ----------\n params : array\n The fitted model parameters.\n start : int, str, or datetime\n Zero-indexed observation number at which to start forecasting, ie.,\n the first forecast is start. Can also be a date string to\n parse or a datetime type.\n end : int, str, or datetime\n Zero-indexed observation number at which to end forecasting, ie.,\n the first forecast is start. Can also be a date string to\n parse or a datetime type.\n dynamic : bool\n The `dynamic` keyword affects in-sample prediction. If dynamic\n is False, then the in-sample lagged values are used for\n prediction. If `dynamic` is True, then in-sample forecasts are\n used in place of lagged dependent variables. The first forecasted\n value is `start`.\n\n Returns\n -------\n array_like\n An array containing the predicted values.\n\n Notes\n -----\n The linear Gaussian Kalman filter is used to return pre-sample fitted\n values. The exact initial Kalman Filter is used. See Durbin and Koopman\n in the references for more information.\n '
if (not (hasattr(self, 'k_ar') and hasattr(self, 'k_trend'))):
raise RuntimeError('Model must be fit before calling predict')
(start, end, out_of_sample, _) = self._get_prediction_index(start, end, dynamic)
k_ar = self.k_ar
k_trend = self.k_trend
method = self.method
endog = self.endog.squeeze()
if dynamic:
out_of_sample += ((end - start) + 1)
return _ar_predict_out_of_sample(endog, params, k_ar, k_trend, out_of_sample, start)
predictedvalues = np.zeros(((end + 1) - start))
if (method == 'mle'):
if k_trend:
mu = (params[0] / (1 - np.sum(params[k_trend:])))
else:
mu = 0
if (start < k_ar):
self._presample_fit(params, start, k_ar, min((k_ar - 1), end), (endog[:k_ar] - mu), predictedvalues)
predictedvalues[:(k_ar - start)] += mu
if (end < k_ar):
return predictedvalues
fittedvalues = np.dot(self.X, params)
pv_start = max((k_ar - start), 0)
fv_start = max((start - k_ar), 0)
fv_end = min(len(fittedvalues), ((end - k_ar) + 1))
predictedvalues[pv_start:] = fittedvalues[fv_start:fv_end]
if out_of_sample:
forecastvalues = _ar_predict_out_of_sample(endog, params, k_ar, k_trend, out_of_sample)
predictedvalues = np.r_[(predictedvalues, forecastvalues)]
return predictedvalues
|
def predict(self, params, start=None, end=None, dynamic=False):
'\n Construct in-sample and out-of-sample prediction.\n\n Parameters\n ----------\n params : array\n The fitted model parameters.\n start : int, str, or datetime\n Zero-indexed observation number at which to start forecasting, ie.,\n the first forecast is start. Can also be a date string to\n parse or a datetime type.\n end : int, str, or datetime\n Zero-indexed observation number at which to end forecasting, ie.,\n the first forecast is start. Can also be a date string to\n parse or a datetime type.\n dynamic : bool\n The `dynamic` keyword affects in-sample prediction. If dynamic\n is False, then the in-sample lagged values are used for\n prediction. If `dynamic` is True, then in-sample forecasts are\n used in place of lagged dependent variables. The first forecasted\n value is `start`.\n\n Returns\n -------\n array_like\n An array containing the predicted values.\n\n Notes\n -----\n The linear Gaussian Kalman filter is used to return pre-sample fitted\n values. The exact initial Kalman Filter is used. See Durbin and Koopman\n in the references for more information.\n '
if (not (hasattr(self, 'k_ar') and hasattr(self, 'k_trend'))):
raise RuntimeError('Model must be fit before calling predict')
(start, end, out_of_sample, _) = self._get_prediction_index(start, end, dynamic)
k_ar = self.k_ar
k_trend = self.k_trend
method = self.method
endog = self.endog.squeeze()
if dynamic:
out_of_sample += ((end - start) + 1)
return _ar_predict_out_of_sample(endog, params, k_ar, k_trend, out_of_sample, start)
predictedvalues = np.zeros(((end + 1) - start))
if (method == 'mle'):
if k_trend:
mu = (params[0] / (1 - np.sum(params[k_trend:])))
else:
mu = 0
if (start < k_ar):
self._presample_fit(params, start, k_ar, min((k_ar - 1), end), (endog[:k_ar] - mu), predictedvalues)
predictedvalues[:(k_ar - start)] += mu
if (end < k_ar):
return predictedvalues
fittedvalues = np.dot(self.X, params)
pv_start = max((k_ar - start), 0)
fv_start = max((start - k_ar), 0)
fv_end = min(len(fittedvalues), ((end - k_ar) + 1))
predictedvalues[pv_start:] = fittedvalues[fv_start:fv_end]
if out_of_sample:
forecastvalues = _ar_predict_out_of_sample(endog, params, k_ar, k_trend, out_of_sample)
predictedvalues = np.r_[(predictedvalues, forecastvalues)]
return predictedvalues<|docstring|>Construct in-sample and out-of-sample prediction.
Parameters
----------
params : array
The fitted model parameters.
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
dynamic : bool
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
Returns
-------
array_like
An array containing the predicted values.
Notes
-----
The linear Gaussian Kalman filter is used to return pre-sample fitted
values. The exact initial Kalman Filter is used. See Durbin and Koopman
in the references for more information.<|endoftext|>
|
eae0fe8f71bd0993182a4d9679a6c8c4887d49bbacf054365c160868297177c5
|
def _presample_varcov(self, params):
'\n Returns the inverse of the presample variance-covariance.\n\n Notes\n -----\n See Hamilton p. 125\n '
k = self.k_trend
p = self.k_ar
params0 = np.r_[((- 1), params[k:])]
Vpinv = np.zeros((p, p), dtype=params.dtype)
for i in range(1, (p + 1)):
Vpinv[((i - 1), (i - 1):)] = np.correlate(params0, params0[:i])[:(- 1)]
Vpinv[((i - 1), (i - 1):)] -= np.correlate(params0[(- i):], params0)[:(- 1)]
Vpinv = ((Vpinv + Vpinv.T) - np.diag(Vpinv.diagonal()))
return Vpinv
|
Returns the inverse of the presample variance-covariance.
Notes
-----
See Hamilton p. 125
|
statsmodels/tsa/ar_model.py
|
_presample_varcov
|
raamana/statsmodels
| 6 |
python
|
def _presample_varcov(self, params):
'\n Returns the inverse of the presample variance-covariance.\n\n Notes\n -----\n See Hamilton p. 125\n '
k = self.k_trend
p = self.k_ar
params0 = np.r_[((- 1), params[k:])]
Vpinv = np.zeros((p, p), dtype=params.dtype)
for i in range(1, (p + 1)):
Vpinv[((i - 1), (i - 1):)] = np.correlate(params0, params0[:i])[:(- 1)]
Vpinv[((i - 1), (i - 1):)] -= np.correlate(params0[(- i):], params0)[:(- 1)]
Vpinv = ((Vpinv + Vpinv.T) - np.diag(Vpinv.diagonal()))
return Vpinv
|
def _presample_varcov(self, params):
'\n Returns the inverse of the presample variance-covariance.\n\n Notes\n -----\n See Hamilton p. 125\n '
k = self.k_trend
p = self.k_ar
params0 = np.r_[((- 1), params[k:])]
Vpinv = np.zeros((p, p), dtype=params.dtype)
for i in range(1, (p + 1)):
Vpinv[((i - 1), (i - 1):)] = np.correlate(params0, params0[:i])[:(- 1)]
Vpinv[((i - 1), (i - 1):)] -= np.correlate(params0[(- i):], params0)[:(- 1)]
Vpinv = ((Vpinv + Vpinv.T) - np.diag(Vpinv.diagonal()))
return Vpinv<|docstring|>Returns the inverse of the presample variance-covariance.
Notes
-----
See Hamilton p. 125<|endoftext|>
|
d7bc99ff64c7dfdcad221050f0af8e0d832eaa3e7641ae25c4ba020f24e82c5d
|
def _loglike_css(self, params):
'\n Loglikelihood of AR(p) process using conditional sum of squares\n '
nobs = self.nobs
Y = self.Y
X = self.X
ssr = sumofsq((Y.squeeze() - np.dot(X, params)))
sigma2 = (ssr / nobs)
return (((- nobs) / 2) * ((np.log((2 * np.pi)) + np.log(sigma2)) + 1))
|
Loglikelihood of AR(p) process using conditional sum of squares
|
statsmodels/tsa/ar_model.py
|
_loglike_css
|
raamana/statsmodels
| 6 |
python
|
def _loglike_css(self, params):
'\n \n '
nobs = self.nobs
Y = self.Y
X = self.X
ssr = sumofsq((Y.squeeze() - np.dot(X, params)))
sigma2 = (ssr / nobs)
return (((- nobs) / 2) * ((np.log((2 * np.pi)) + np.log(sigma2)) + 1))
|
def _loglike_css(self, params):
'\n \n '
nobs = self.nobs
Y = self.Y
X = self.X
ssr = sumofsq((Y.squeeze() - np.dot(X, params)))
sigma2 = (ssr / nobs)
return (((- nobs) / 2) * ((np.log((2 * np.pi)) + np.log(sigma2)) + 1))<|docstring|>Loglikelihood of AR(p) process using conditional sum of squares<|endoftext|>
|
08216abc496d84c88dc42a71016358890e4703200d6e9c7ec37b1197e62dc165
|
def _loglike_mle(self, params):
'\n Loglikelihood of AR(p) process using exact maximum likelihood\n '
nobs = self.nobs
X = self.X
endog = self.endog
k_ar = self.k_ar
k_trend = self.k_trend
if self.transparams:
params = self._transparams(params)
yp = endog[:k_ar].copy()
if k_trend:
c = ([params[0]] * k_ar)
else:
c = [0]
mup = np.asarray((c / (1 - np.sum(params[k_trend:]))))
diffp = (yp - mup[(:, None)])
Vpinv = self._presample_varcov(params)
diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
ssr = sumofsq((endog[k_ar:].squeeze() - np.dot(X, params)))
sigma2 = ((1.0 / nobs) * (diffpVpinv + ssr))
self.sigma2 = sigma2
logdet = slogdet(Vpinv)[1]
loglike = (((- 1) / 2.0) * ((((nobs * (np.log((2 * np.pi)) + np.log(sigma2))) - logdet) + (diffpVpinv / sigma2)) + (ssr / sigma2)))
return loglike
|
Loglikelihood of AR(p) process using exact maximum likelihood
|
statsmodels/tsa/ar_model.py
|
_loglike_mle
|
raamana/statsmodels
| 6 |
python
|
def _loglike_mle(self, params):
'\n \n '
nobs = self.nobs
X = self.X
endog = self.endog
k_ar = self.k_ar
k_trend = self.k_trend
if self.transparams:
params = self._transparams(params)
yp = endog[:k_ar].copy()
if k_trend:
c = ([params[0]] * k_ar)
else:
c = [0]
mup = np.asarray((c / (1 - np.sum(params[k_trend:]))))
diffp = (yp - mup[(:, None)])
Vpinv = self._presample_varcov(params)
diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
ssr = sumofsq((endog[k_ar:].squeeze() - np.dot(X, params)))
sigma2 = ((1.0 / nobs) * (diffpVpinv + ssr))
self.sigma2 = sigma2
logdet = slogdet(Vpinv)[1]
loglike = (((- 1) / 2.0) * ((((nobs * (np.log((2 * np.pi)) + np.log(sigma2))) - logdet) + (diffpVpinv / sigma2)) + (ssr / sigma2)))
return loglike
|
def _loglike_mle(self, params):
'\n \n '
nobs = self.nobs
X = self.X
endog = self.endog
k_ar = self.k_ar
k_trend = self.k_trend
if self.transparams:
params = self._transparams(params)
yp = endog[:k_ar].copy()
if k_trend:
c = ([params[0]] * k_ar)
else:
c = [0]
mup = np.asarray((c / (1 - np.sum(params[k_trend:]))))
diffp = (yp - mup[(:, None)])
Vpinv = self._presample_varcov(params)
diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
ssr = sumofsq((endog[k_ar:].squeeze() - np.dot(X, params)))
sigma2 = ((1.0 / nobs) * (diffpVpinv + ssr))
self.sigma2 = sigma2
logdet = slogdet(Vpinv)[1]
loglike = (((- 1) / 2.0) * ((((nobs * (np.log((2 * np.pi)) + np.log(sigma2))) - logdet) + (diffpVpinv / sigma2)) + (ssr / sigma2)))
return loglike<|docstring|>Loglikelihood of AR(p) process using exact maximum likelihood<|endoftext|>
|
e6d1283e24437fe1d7aab9947e953a51b2bc6f5aa11113524201ca47044d81ae
|
def loglike(self, params):
'\n The loglikelihood of an AR(p) process.\n\n Parameters\n ----------\n params : array\n The fitted parameters of the AR model.\n\n Returns\n -------\n float\n The loglikelihood evaluated at `params`.\n\n Notes\n -----\n Contains constant term. If the model is fit by OLS then this returns\n the conditional maximum likelihood.\n\n .. math::\n\n \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)\n +\\log\\left(\\sigma^{2}\\right)\\right)\n -\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}\n\n If it is fit by MLE then the (exact) unconditional maximum likelihood\n is returned.\n\n .. math::\n\n -\\frac{n}{2}log\\left(2\\pi\\right)\n -\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)\n +\\frac{1}{2}\\left|V_{p}^{-1}\\right|\n -\\frac{1}{2\\sigma^{2}}\\left(y_{p}\n -\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)\n -\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}\n\n where\n\n :math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the\n mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)\n variance-covariance matrix of the first `p` observations.\n '
if (self.method == 'cmle'):
return self._loglike_css(params)
else:
return self._loglike_mle(params)
|
The loglikelihood of an AR(p) process.
Parameters
----------
params : array
The fitted parameters of the AR model.
Returns
-------
float
The loglikelihood evaluated at `params`.
Notes
-----
Contains constant term. If the model is fit by OLS then this returns
the conditional maximum likelihood.
.. math::
\frac{\left(n-p\right)}{2}\left(\log\left(2\pi\right)
+\log\left(\sigma^{2}\right)\right)
-\frac{1}{\sigma^{2}}\sum_{i}\epsilon_{i}^{2}
If it is fit by MLE then the (exact) unconditional maximum likelihood
is returned.
.. math::
-\frac{n}{2}log\left(2\pi\right)
-\frac{n}{2}\log\left(\sigma^{2}\right)
+\frac{1}{2}\left|V_{p}^{-1}\right|
-\frac{1}{2\sigma^{2}}\left(y_{p}
-\mu_{p}\right)^{\prime}V_{p}^{-1}\left(y_{p}-\mu_{p}\right)
-\frac{1}{2\sigma^{2}}\sum_{t=p+1}^{n}\epsilon_{i}^{2}
where
:math:`\mu_{p}` is a (`p` x 1) vector with each element equal to the
mean of the AR process and :math:`\sigma^{2}V_{p}` is the (`p` x `p`)
variance-covariance matrix of the first `p` observations.
|
statsmodels/tsa/ar_model.py
|
loglike
|
raamana/statsmodels
| 6 |
python
|
def loglike(self, params):
'\n The loglikelihood of an AR(p) process.\n\n Parameters\n ----------\n params : array\n The fitted parameters of the AR model.\n\n Returns\n -------\n float\n The loglikelihood evaluated at `params`.\n\n Notes\n -----\n Contains constant term. If the model is fit by OLS then this returns\n the conditional maximum likelihood.\n\n .. math::\n\n \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)\n +\\log\\left(\\sigma^{2}\\right)\\right)\n -\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}\n\n If it is fit by MLE then the (exact) unconditional maximum likelihood\n is returned.\n\n .. math::\n\n -\\frac{n}{2}log\\left(2\\pi\\right)\n -\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)\n +\\frac{1}{2}\\left|V_{p}^{-1}\\right|\n -\\frac{1}{2\\sigma^{2}}\\left(y_{p}\n -\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)\n -\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}\n\n where\n\n :math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the\n mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)\n variance-covariance matrix of the first `p` observations.\n '
if (self.method == 'cmle'):
return self._loglike_css(params)
else:
return self._loglike_mle(params)
|
def loglike(self, params):
'\n The loglikelihood of an AR(p) process.\n\n Parameters\n ----------\n params : array\n The fitted parameters of the AR model.\n\n Returns\n -------\n float\n The loglikelihood evaluated at `params`.\n\n Notes\n -----\n Contains constant term. If the model is fit by OLS then this returns\n the conditional maximum likelihood.\n\n .. math::\n\n \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)\n +\\log\\left(\\sigma^{2}\\right)\\right)\n -\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}\n\n If it is fit by MLE then the (exact) unconditional maximum likelihood\n is returned.\n\n .. math::\n\n -\\frac{n}{2}log\\left(2\\pi\\right)\n -\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)\n +\\frac{1}{2}\\left|V_{p}^{-1}\\right|\n -\\frac{1}{2\\sigma^{2}}\\left(y_{p}\n -\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)\n -\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}\n\n where\n\n :math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the\n mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)\n variance-covariance matrix of the first `p` observations.\n '
if (self.method == 'cmle'):
return self._loglike_css(params)
else:
return self._loglike_mle(params)<|docstring|>The loglikelihood of an AR(p) process.
Parameters
----------
params : array
The fitted parameters of the AR model.
Returns
-------
float
The loglikelihood evaluated at `params`.
Notes
-----
Contains constant term. If the model is fit by OLS then this returns
the conditional maximum likelihood.
.. math::
\frac{\left(n-p\right)}{2}\left(\log\left(2\pi\right)
+\log\left(\sigma^{2}\right)\right)
-\frac{1}{\sigma^{2}}\sum_{i}\epsilon_{i}^{2}
If it is fit by MLE then the (exact) unconditional maximum likelihood
is returned.
.. math::
-\frac{n}{2}log\left(2\pi\right)
-\frac{n}{2}\log\left(\sigma^{2}\right)
+\frac{1}{2}\left|V_{p}^{-1}\right|
-\frac{1}{2\sigma^{2}}\left(y_{p}
-\mu_{p}\right)^{\prime}V_{p}^{-1}\left(y_{p}-\mu_{p}\right)
-\frac{1}{2\sigma^{2}}\sum_{t=p+1}^{n}\epsilon_{i}^{2}
where
:math:`\mu_{p}` is a (`p` x 1) vector with each element equal to the
mean of the AR process and :math:`\sigma^{2}V_{p}` is the (`p` x `p`)
variance-covariance matrix of the first `p` observations.<|endoftext|>
|
97eca8a6f126ef17d9da39133cab8675616cabb2d9cb4060e1b6ffc450db0633
|
def score(self, params):
'\n Compute the gradient of the log-likelihood at params.\n\n Parameters\n ----------\n params : array_like\n The parameter values at which to evaluate the score function.\n\n Returns\n -------\n ndarray\n The gradient computed using numerical methods.\n '
loglike = self.loglike
return approx_fprime(params, loglike, epsilon=1e-08)
|
Compute the gradient of the log-likelihood at params.
Parameters
----------
params : array_like
The parameter values at which to evaluate the score function.
Returns
-------
ndarray
The gradient computed using numerical methods.
|
statsmodels/tsa/ar_model.py
|
score
|
raamana/statsmodels
| 6 |
python
|
def score(self, params):
'\n Compute the gradient of the log-likelihood at params.\n\n Parameters\n ----------\n params : array_like\n The parameter values at which to evaluate the score function.\n\n Returns\n -------\n ndarray\n The gradient computed using numerical methods.\n '
loglike = self.loglike
return approx_fprime(params, loglike, epsilon=1e-08)
|
def score(self, params):
'\n Compute the gradient of the log-likelihood at params.\n\n Parameters\n ----------\n params : array_like\n The parameter values at which to evaluate the score function.\n\n Returns\n -------\n ndarray\n The gradient computed using numerical methods.\n '
loglike = self.loglike
return approx_fprime(params, loglike, epsilon=1e-08)<|docstring|>Compute the gradient of the log-likelihood at params.
Parameters
----------
params : array_like
The parameter values at which to evaluate the score function.
Returns
-------
ndarray
The gradient computed using numerical methods.<|endoftext|>
|
3870ef08385b1f921337fd23fdf902af40d4b8a705358f53da831c1f51fe474a
|
def information(self, params):
'\n Not implemented.\n\n Parameters\n ----------\n params : ndarray\n The model parameters.\n '
return
|
Not implemented.
Parameters
----------
params : ndarray
The model parameters.
|
statsmodels/tsa/ar_model.py
|
information
|
raamana/statsmodels
| 6 |
python
|
def information(self, params):
'\n Not implemented.\n\n Parameters\n ----------\n params : ndarray\n The model parameters.\n '
return
|
def information(self, params):
'\n Not implemented.\n\n Parameters\n ----------\n params : ndarray\n The model parameters.\n '
return<|docstring|>Not implemented.
Parameters
----------
params : ndarray
The model parameters.<|endoftext|>
|
a0b05073764e948c71fc21cddb269b479bac49587178a1a6e766a0473981bd2b
|
def hessian(self, params):
'\n Compute the hessian using a numerical approximation.\n\n Parameters\n ----------\n params : ndarray\n The model parameters.\n\n Returns\n -------\n ndarray\n The hessian evaluated at params.\n '
loglike = self.loglike
return approx_hess(params, loglike)
|
Compute the hessian using a numerical approximation.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
ndarray
The hessian evaluated at params.
|
statsmodels/tsa/ar_model.py
|
hessian
|
raamana/statsmodels
| 6 |
python
|
def hessian(self, params):
'\n Compute the hessian using a numerical approximation.\n\n Parameters\n ----------\n params : ndarray\n The model parameters.\n\n Returns\n -------\n ndarray\n The hessian evaluated at params.\n '
loglike = self.loglike
return approx_hess(params, loglike)
|
def hessian(self, params):
'\n Compute the hessian using a numerical approximation.\n\n Parameters\n ----------\n params : ndarray\n The model parameters.\n\n Returns\n -------\n ndarray\n The hessian evaluated at params.\n '
loglike = self.loglike
return approx_hess(params, loglike)<|docstring|>Compute the hessian using a numerical approximation.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
ndarray
The hessian evaluated at params.<|endoftext|>
|
84af7bfe0bb1e2305842bd47d04f464f0d90063911a0608de9221dba3e51a641
|
def _stackX(self, k_ar, trend):
'\n Private method to build the RHS matrix for estimation.\n\n Columns are trend terms then lags.\n '
endog = self.endog
X = lagmat(endog, maxlag=k_ar, trim='both')
k_trend = util.get_trendorder(trend)
if k_trend:
X = add_trend(X, prepend=True, trend=trend, has_constant='raise')
self.k_trend = k_trend
return X
|
Private method to build the RHS matrix for estimation.
Columns are trend terms then lags.
|
statsmodels/tsa/ar_model.py
|
_stackX
|
raamana/statsmodels
| 6 |
python
|
def _stackX(self, k_ar, trend):
'\n Private method to build the RHS matrix for estimation.\n\n Columns are trend terms then lags.\n '
endog = self.endog
X = lagmat(endog, maxlag=k_ar, trim='both')
k_trend = util.get_trendorder(trend)
if k_trend:
X = add_trend(X, prepend=True, trend=trend, has_constant='raise')
self.k_trend = k_trend
return X
|
def _stackX(self, k_ar, trend):
'\n Private method to build the RHS matrix for estimation.\n\n Columns are trend terms then lags.\n '
endog = self.endog
X = lagmat(endog, maxlag=k_ar, trim='both')
k_trend = util.get_trendorder(trend)
if k_trend:
X = add_trend(X, prepend=True, trend=trend, has_constant='raise')
self.k_trend = k_trend
return X<|docstring|>Private method to build the RHS matrix for estimation.
Columns are trend terms then lags.<|endoftext|>
|
9dc76f658040eaecc55ec781aecf81816aec1f986311747f1a608088480fb653
|
def select_order(self, maxlag, ic, trend='c', method='mle'):
"\n Select the lag order according to the information criterion.\n\n Parameters\n ----------\n maxlag : int\n The highest lag length tried. See `AR.fit`.\n ic : {'aic','bic','hqic','t-stat'}\n Criterion used for selecting the optimal lag length.\n See `AR.fit`.\n trend : {'c','nc'}\n Whether to include a constant or not. 'c' - include constant.\n 'nc' - no constant.\n method : {'cmle', 'mle'}, optional\n The method to use in estimation.\n\n * 'cmle' - Conditional maximum likelihood using OLS\n * 'mle' - Unconditional (exact) maximum likelihood. See `solver`\n and the Notes.\n\n Returns\n -------\n int\n Best lag according to the information criteria.\n "
endog = self.endog
Y = endog[maxlag:]
self.Y = Y
X = self._stackX(maxlag, trend)
self.X = X
k = self.k_trend
k = max(1, k)
results = {}
if (ic != 't-stat'):
for lag in range(k, (maxlag + 1)):
endog_tmp = endog[(maxlag - lag):]
fit = AR(endog_tmp).fit(maxlag=lag, method=method, full_output=0, trend=trend, maxiter=100, disp=0)
results[lag] = getattr(fit, ic)
(bestic, bestlag) = min(((res, k) for (k, res) in results.items()))
else:
stop = 1.6448536269514722
for lag in range(maxlag, (k - 1), (- 1)):
endog_tmp = endog[(maxlag - lag):]
fit = AR(endog_tmp).fit(maxlag=lag, method=method, full_output=0, trend=trend, maxiter=35, disp=(- 1))
bestlag = 0
if (np.abs(fit.tvalues[(- 1)]) >= stop):
bestlag = lag
break
return bestlag
|
Select the lag order according to the information criterion.
Parameters
----------
maxlag : int
The highest lag length tried. See `AR.fit`.
ic : {'aic','bic','hqic','t-stat'}
Criterion used for selecting the optimal lag length.
See `AR.fit`.
trend : {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
method : {'cmle', 'mle'}, optional
The method to use in estimation.
* 'cmle' - Conditional maximum likelihood using OLS
* 'mle' - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
Returns
-------
int
Best lag according to the information criteria.
|
statsmodels/tsa/ar_model.py
|
select_order
|
raamana/statsmodels
| 6 |
python
|
def select_order(self, maxlag, ic, trend='c', method='mle'):
"\n Select the lag order according to the information criterion.\n\n Parameters\n ----------\n maxlag : int\n The highest lag length tried. See `AR.fit`.\n ic : {'aic','bic','hqic','t-stat'}\n Criterion used for selecting the optimal lag length.\n See `AR.fit`.\n trend : {'c','nc'}\n Whether to include a constant or not. 'c' - include constant.\n 'nc' - no constant.\n method : {'cmle', 'mle'}, optional\n The method to use in estimation.\n\n * 'cmle' - Conditional maximum likelihood using OLS\n * 'mle' - Unconditional (exact) maximum likelihood. See `solver`\n and the Notes.\n\n Returns\n -------\n int\n Best lag according to the information criteria.\n "
endog = self.endog
Y = endog[maxlag:]
self.Y = Y
X = self._stackX(maxlag, trend)
self.X = X
k = self.k_trend
k = max(1, k)
results = {}
if (ic != 't-stat'):
for lag in range(k, (maxlag + 1)):
endog_tmp = endog[(maxlag - lag):]
fit = AR(endog_tmp).fit(maxlag=lag, method=method, full_output=0, trend=trend, maxiter=100, disp=0)
results[lag] = getattr(fit, ic)
(bestic, bestlag) = min(((res, k) for (k, res) in results.items()))
else:
stop = 1.6448536269514722
for lag in range(maxlag, (k - 1), (- 1)):
endog_tmp = endog[(maxlag - lag):]
fit = AR(endog_tmp).fit(maxlag=lag, method=method, full_output=0, trend=trend, maxiter=35, disp=(- 1))
bestlag = 0
if (np.abs(fit.tvalues[(- 1)]) >= stop):
bestlag = lag
break
return bestlag
|
def select_order(self, maxlag, ic, trend='c', method='mle'):
"\n Select the lag order according to the information criterion.\n\n Parameters\n ----------\n maxlag : int\n The highest lag length tried. See `AR.fit`.\n ic : {'aic','bic','hqic','t-stat'}\n Criterion used for selecting the optimal lag length.\n See `AR.fit`.\n trend : {'c','nc'}\n Whether to include a constant or not. 'c' - include constant.\n 'nc' - no constant.\n method : {'cmle', 'mle'}, optional\n The method to use in estimation.\n\n * 'cmle' - Conditional maximum likelihood using OLS\n * 'mle' - Unconditional (exact) maximum likelihood. See `solver`\n and the Notes.\n\n Returns\n -------\n int\n Best lag according to the information criteria.\n "
endog = self.endog
Y = endog[maxlag:]
self.Y = Y
X = self._stackX(maxlag, trend)
self.X = X
k = self.k_trend
k = max(1, k)
results = {}
if (ic != 't-stat'):
for lag in range(k, (maxlag + 1)):
endog_tmp = endog[(maxlag - lag):]
fit = AR(endog_tmp).fit(maxlag=lag, method=method, full_output=0, trend=trend, maxiter=100, disp=0)
results[lag] = getattr(fit, ic)
(bestic, bestlag) = min(((res, k) for (k, res) in results.items()))
else:
stop = 1.6448536269514722
for lag in range(maxlag, (k - 1), (- 1)):
endog_tmp = endog[(maxlag - lag):]
fit = AR(endog_tmp).fit(maxlag=lag, method=method, full_output=0, trend=trend, maxiter=35, disp=(- 1))
bestlag = 0
if (np.abs(fit.tvalues[(- 1)]) >= stop):
bestlag = lag
break
return bestlag<|docstring|>Select the lag order according to the information criterion.
Parameters
----------
maxlag : int
The highest lag length tried. See `AR.fit`.
ic : {'aic','bic','hqic','t-stat'}
Criterion used for selecting the optimal lag length.
See `AR.fit`.
trend : {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
method : {'cmle', 'mle'}, optional
The method to use in estimation.
* 'cmle' - Conditional maximum likelihood using OLS
* 'mle' - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
Returns
-------
int
Best lag according to the information criteria.<|endoftext|>
|
b22f6165c648f72332368aba778fda56662acf9e6b7624671f0105df581f0b19
|
def fit(self, maxlag=None, method='cmle', ic=None, trend='c', transparams=True, start_params=None, solver='lbfgs', maxiter=35, full_output=1, disp=1, callback=None, **kwargs):
'\n Fit the unconditional maximum likelihood of an AR(p) process.\n\n Parameters\n ----------\n maxlag : int\n If `ic` is None, then maxlag is the lag length used in fit. If\n `ic` is specified then maxlag is the highest lag order used to\n select the correct lag order. If maxlag is None, the default is\n round(12*(nobs/100.)**(1/4.)).\n method : {\'cmle\', \'mle\'}, optional\n The method to use in estimation.\n\n * \'cmle\' - Conditional maximum likelihood using OLS\n * \'mle\' - Unconditional (exact) maximum likelihood. See `solver`\n and the Notes.\n ic : {\'aic\',\'bic\',\'hic\',\'t-stat\'}\n Criterion used for selecting the optimal lag length.\n\n * \'aic\' - Akaike Information Criterion\n * \'bic\' - Bayes Information Criterion\n * \'t-stat\' - Based on last lag\n * \'hqic\' - Hannan-Quinn Information Criterion\n\n If any of the information criteria are selected, the lag length\n which results in the lowest value is selected. If t-stat, the\n model starts with maxlag and drops a lag until the highest lag\n has a t-stat that is significant at the 95 % level.\n trend : {\'c\',\'nc\'}\n Whether to include a constant or not.\n\n * \'c\' - include constant.\n * \'nc\' - no constant.\n transparams : bool, optional\n Whether or not to transform the parameters to ensure stationarity.\n Uses the transformation suggested in Jones (1980).\n start_params : array_like, optional\n A first guess on the parameters. Default is cmle estimates.\n solver : str or None, optional\n Solver to be used if method is \'mle\'. The default is \'lbfgs\'\n (limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices\n are \'bfgs\', \'newton\' (Newton-Raphson), \'nm\' (Nelder-Mead),\n \'cg\' - (conjugate gradient), \'ncg\' (non-conjugate gradient),\n and \'powell\'.\n maxiter : int, optional\n The maximum number of function evaluations. Default is 35.\n full_output : bool, optional\n If True, all output from solver will be available in\n the Results object\'s mle_retvals attribute. Output is dependent\n on the solver. See Notes for more information.\n disp : bool, optional\n If True, convergence information is output.\n callback : function, optional\n Called after each iteration as callback(xk) where xk is the current\n parameter vector.\n **kwargs\n See LikelihoodModel.fit for keyword arguments that can be passed\n to fit.\n\n Returns\n -------\n ARResults\n Results instance.\n\n See Also\n --------\n statsmodels.base.model.LikelihoodModel.fit\n Base fit class with further details about options.\n\n Notes\n -----\n The parameters after `trend` are only used when method is \'mle\'.\n\n References\n ----------\n .. [*] Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to\n time series with missing observations." `Technometrics`. 22.3.\n 389-95.\n '
start_params = array_like(start_params, 'start_params', ndim=1, optional=True)
method = method.lower()
if (method not in ['cmle', 'mle']):
raise ValueError(('Method %s not recognized' % method))
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog)
endog = self.endog
fit_params = (maxlag, method, ic, trend)
if ((self._fit_params is not None) and (self._fit_params != fit_params)):
raise RuntimeError(REPEATED_FIT_ERROR.format(*self._fit_params))
if (maxlag is None):
maxlag = int(round((12 * ((nobs / 100.0) ** (1 / 4.0)))))
k_ar = maxlag
if (ic is not None):
ic = ic.lower()
if (ic not in ['aic', 'bic', 'hqic', 't-stat']):
raise ValueError(('ic option %s not understood' % ic))
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar
Y = endog[(k_ar:, :)]
X = self._stackX(k_ar, trend)
k_trend = self.k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if (method == 'cmle'):
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = (nobs - k_ar)
self.sigma2 = (arfit.ssr / arfit.nobs)
else:
solver = solver.lower()
self.nobs = nobs
if (start_params is None):
start_params = OLS(Y, X).fit().params
elif (len(start_params) != (k_trend + k_ar)):
raise ValueError(('Length of start params is %d. There are %d parameters.' % (len(start_params), (k_trend + k_ar))))
start_params = self._invtransparams(start_params)
if (solver == 'lbfgs'):
kwargs.setdefault('pgtol', 1e-08)
kwargs.setdefault('factr', 100.0)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(AR, self).fit(start_params=start_params, method=solver, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs)
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(copy.copy(self), params, normalized_cov_params)
if ((method == 'mle') and full_output):
arfit.mle_retvals = mlefit.mle_retvals
arfit.mle_settings = mlefit.mle_settings
if (self._fit_params is None):
self._fit_params = fit_params
return ARResultsWrapper(arfit)
|
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.)).
method : {'cmle', 'mle'}, optional
The method to use in estimation.
* 'cmle' - Conditional maximum likelihood using OLS
* 'mle' - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
* 'aic' - Akaike Information Criterion
* 'bic' - Bayes Information Criterion
* 't-stat' - Based on last lag
* 'hqic' - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : {'c','nc'}
Whether to include a constant or not.
* 'c' - include constant.
* 'nc' - no constant.
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array_like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used if method is 'mle'. The default is 'lbfgs'
(limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices
are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),
'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),
and 'powell'.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
**kwargs
See LikelihoodModel.fit for keyword arguments that can be passed
to fit.
Returns
-------
ARResults
Results instance.
See Also
--------
statsmodels.base.model.LikelihoodModel.fit
Base fit class with further details about options.
Notes
-----
The parameters after `trend` are only used when method is 'mle'.
References
----------
.. [*] Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to
time series with missing observations." `Technometrics`. 22.3.
389-95.
|
statsmodels/tsa/ar_model.py
|
fit
|
raamana/statsmodels
| 6 |
python
|
def fit(self, maxlag=None, method='cmle', ic=None, trend='c', transparams=True, start_params=None, solver='lbfgs', maxiter=35, full_output=1, disp=1, callback=None, **kwargs):
'\n Fit the unconditional maximum likelihood of an AR(p) process.\n\n Parameters\n ----------\n maxlag : int\n If `ic` is None, then maxlag is the lag length used in fit. If\n `ic` is specified then maxlag is the highest lag order used to\n select the correct lag order. If maxlag is None, the default is\n round(12*(nobs/100.)**(1/4.)).\n method : {\'cmle\', \'mle\'}, optional\n The method to use in estimation.\n\n * \'cmle\' - Conditional maximum likelihood using OLS\n * \'mle\' - Unconditional (exact) maximum likelihood. See `solver`\n and the Notes.\n ic : {\'aic\',\'bic\',\'hic\',\'t-stat\'}\n Criterion used for selecting the optimal lag length.\n\n * \'aic\' - Akaike Information Criterion\n * \'bic\' - Bayes Information Criterion\n * \'t-stat\' - Based on last lag\n * \'hqic\' - Hannan-Quinn Information Criterion\n\n If any of the information criteria are selected, the lag length\n which results in the lowest value is selected. If t-stat, the\n model starts with maxlag and drops a lag until the highest lag\n has a t-stat that is significant at the 95 % level.\n trend : {\'c\',\'nc\'}\n Whether to include a constant or not.\n\n * \'c\' - include constant.\n * \'nc\' - no constant.\n transparams : bool, optional\n Whether or not to transform the parameters to ensure stationarity.\n Uses the transformation suggested in Jones (1980).\n start_params : array_like, optional\n A first guess on the parameters. Default is cmle estimates.\n solver : str or None, optional\n Solver to be used if method is \'mle\'. The default is \'lbfgs\'\n (limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices\n are \'bfgs\', \'newton\' (Newton-Raphson), \'nm\' (Nelder-Mead),\n \'cg\' - (conjugate gradient), \'ncg\' (non-conjugate gradient),\n and \'powell\'.\n maxiter : int, optional\n The maximum number of function evaluations. Default is 35.\n full_output : bool, optional\n If True, all output from solver will be available in\n the Results object\'s mle_retvals attribute. Output is dependent\n on the solver. See Notes for more information.\n disp : bool, optional\n If True, convergence information is output.\n callback : function, optional\n Called after each iteration as callback(xk) where xk is the current\n parameter vector.\n **kwargs\n See LikelihoodModel.fit for keyword arguments that can be passed\n to fit.\n\n Returns\n -------\n ARResults\n Results instance.\n\n See Also\n --------\n statsmodels.base.model.LikelihoodModel.fit\n Base fit class with further details about options.\n\n Notes\n -----\n The parameters after `trend` are only used when method is \'mle\'.\n\n References\n ----------\n .. [*] Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to\n time series with missing observations." `Technometrics`. 22.3.\n 389-95.\n '
start_params = array_like(start_params, 'start_params', ndim=1, optional=True)
method = method.lower()
if (method not in ['cmle', 'mle']):
raise ValueError(('Method %s not recognized' % method))
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog)
endog = self.endog
fit_params = (maxlag, method, ic, trend)
if ((self._fit_params is not None) and (self._fit_params != fit_params)):
raise RuntimeError(REPEATED_FIT_ERROR.format(*self._fit_params))
if (maxlag is None):
maxlag = int(round((12 * ((nobs / 100.0) ** (1 / 4.0)))))
k_ar = maxlag
if (ic is not None):
ic = ic.lower()
if (ic not in ['aic', 'bic', 'hqic', 't-stat']):
raise ValueError(('ic option %s not understood' % ic))
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar
Y = endog[(k_ar:, :)]
X = self._stackX(k_ar, trend)
k_trend = self.k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if (method == 'cmle'):
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = (nobs - k_ar)
self.sigma2 = (arfit.ssr / arfit.nobs)
else:
solver = solver.lower()
self.nobs = nobs
if (start_params is None):
start_params = OLS(Y, X).fit().params
elif (len(start_params) != (k_trend + k_ar)):
raise ValueError(('Length of start params is %d. There are %d parameters.' % (len(start_params), (k_trend + k_ar))))
start_params = self._invtransparams(start_params)
if (solver == 'lbfgs'):
kwargs.setdefault('pgtol', 1e-08)
kwargs.setdefault('factr', 100.0)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(AR, self).fit(start_params=start_params, method=solver, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs)
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(copy.copy(self), params, normalized_cov_params)
if ((method == 'mle') and full_output):
arfit.mle_retvals = mlefit.mle_retvals
arfit.mle_settings = mlefit.mle_settings
if (self._fit_params is None):
self._fit_params = fit_params
return ARResultsWrapper(arfit)
|
def fit(self, maxlag=None, method='cmle', ic=None, trend='c', transparams=True, start_params=None, solver='lbfgs', maxiter=35, full_output=1, disp=1, callback=None, **kwargs):
'\n Fit the unconditional maximum likelihood of an AR(p) process.\n\n Parameters\n ----------\n maxlag : int\n If `ic` is None, then maxlag is the lag length used in fit. If\n `ic` is specified then maxlag is the highest lag order used to\n select the correct lag order. If maxlag is None, the default is\n round(12*(nobs/100.)**(1/4.)).\n method : {\'cmle\', \'mle\'}, optional\n The method to use in estimation.\n\n * \'cmle\' - Conditional maximum likelihood using OLS\n * \'mle\' - Unconditional (exact) maximum likelihood. See `solver`\n and the Notes.\n ic : {\'aic\',\'bic\',\'hic\',\'t-stat\'}\n Criterion used for selecting the optimal lag length.\n\n * \'aic\' - Akaike Information Criterion\n * \'bic\' - Bayes Information Criterion\n * \'t-stat\' - Based on last lag\n * \'hqic\' - Hannan-Quinn Information Criterion\n\n If any of the information criteria are selected, the lag length\n which results in the lowest value is selected. If t-stat, the\n model starts with maxlag and drops a lag until the highest lag\n has a t-stat that is significant at the 95 % level.\n trend : {\'c\',\'nc\'}\n Whether to include a constant or not.\n\n * \'c\' - include constant.\n * \'nc\' - no constant.\n transparams : bool, optional\n Whether or not to transform the parameters to ensure stationarity.\n Uses the transformation suggested in Jones (1980).\n start_params : array_like, optional\n A first guess on the parameters. Default is cmle estimates.\n solver : str or None, optional\n Solver to be used if method is \'mle\'. The default is \'lbfgs\'\n (limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices\n are \'bfgs\', \'newton\' (Newton-Raphson), \'nm\' (Nelder-Mead),\n \'cg\' - (conjugate gradient), \'ncg\' (non-conjugate gradient),\n and \'powell\'.\n maxiter : int, optional\n The maximum number of function evaluations. Default is 35.\n full_output : bool, optional\n If True, all output from solver will be available in\n the Results object\'s mle_retvals attribute. Output is dependent\n on the solver. See Notes for more information.\n disp : bool, optional\n If True, convergence information is output.\n callback : function, optional\n Called after each iteration as callback(xk) where xk is the current\n parameter vector.\n **kwargs\n See LikelihoodModel.fit for keyword arguments that can be passed\n to fit.\n\n Returns\n -------\n ARResults\n Results instance.\n\n See Also\n --------\n statsmodels.base.model.LikelihoodModel.fit\n Base fit class with further details about options.\n\n Notes\n -----\n The parameters after `trend` are only used when method is \'mle\'.\n\n References\n ----------\n .. [*] Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to\n time series with missing observations." `Technometrics`. 22.3.\n 389-95.\n '
start_params = array_like(start_params, 'start_params', ndim=1, optional=True)
method = method.lower()
if (method not in ['cmle', 'mle']):
raise ValueError(('Method %s not recognized' % method))
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog)
endog = self.endog
fit_params = (maxlag, method, ic, trend)
if ((self._fit_params is not None) and (self._fit_params != fit_params)):
raise RuntimeError(REPEATED_FIT_ERROR.format(*self._fit_params))
if (maxlag is None):
maxlag = int(round((12 * ((nobs / 100.0) ** (1 / 4.0)))))
k_ar = maxlag
if (ic is not None):
ic = ic.lower()
if (ic not in ['aic', 'bic', 'hqic', 't-stat']):
raise ValueError(('ic option %s not understood' % ic))
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar
Y = endog[(k_ar:, :)]
X = self._stackX(k_ar, trend)
k_trend = self.k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if (method == 'cmle'):
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = (nobs - k_ar)
self.sigma2 = (arfit.ssr / arfit.nobs)
else:
solver = solver.lower()
self.nobs = nobs
if (start_params is None):
start_params = OLS(Y, X).fit().params
elif (len(start_params) != (k_trend + k_ar)):
raise ValueError(('Length of start params is %d. There are %d parameters.' % (len(start_params), (k_trend + k_ar))))
start_params = self._invtransparams(start_params)
if (solver == 'lbfgs'):
kwargs.setdefault('pgtol', 1e-08)
kwargs.setdefault('factr', 100.0)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(AR, self).fit(start_params=start_params, method=solver, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, **kwargs)
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(copy.copy(self), params, normalized_cov_params)
if ((method == 'mle') and full_output):
arfit.mle_retvals = mlefit.mle_retvals
arfit.mle_settings = mlefit.mle_settings
if (self._fit_params is None):
self._fit_params = fit_params
return ARResultsWrapper(arfit)<|docstring|>Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.)).
method : {'cmle', 'mle'}, optional
The method to use in estimation.
* 'cmle' - Conditional maximum likelihood using OLS
* 'mle' - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
* 'aic' - Akaike Information Criterion
* 'bic' - Bayes Information Criterion
* 't-stat' - Based on last lag
* 'hqic' - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : {'c','nc'}
Whether to include a constant or not.
* 'c' - include constant.
* 'nc' - no constant.
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array_like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used if method is 'mle'. The default is 'lbfgs'
(limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices
are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),
'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),
and 'powell'.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
**kwargs
See LikelihoodModel.fit for keyword arguments that can be passed
to fit.
Returns
-------
ARResults
Results instance.
See Also
--------
statsmodels.base.model.LikelihoodModel.fit
Base fit class with further details about options.
Notes
-----
The parameters after `trend` are only used when method is 'mle'.
References
----------
.. [*] Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to
time series with missing observations." `Technometrics`. 22.3.
389-95.<|endoftext|>
|
a9c64afd5251e32844d5cf5dcf513ddbf016d448e396de140a598d5eedc8b4e9
|
@cache_readonly
def bse(self):
"\n The standard errors of the estimated parameters.\n\n If `method` is 'cmle', then the standard errors that are returned are\n the OLS standard errors of the coefficients. If the `method` is 'mle'\n then they are computed using the numerical Hessian.\n "
if (self.model.method == 'cmle'):
resid = self.resid
ssr = np.dot(resid, resid)
ols_scale = (ssr / ((self.nobs - self.k_ar) - self.k_trend))
return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))
else:
hess = approx_hess(self.params, self.model.loglike)
return np.sqrt(np.diag((- np.linalg.inv(hess))))
|
The standard errors of the estimated parameters.
If `method` is 'cmle', then the standard errors that are returned are
the OLS standard errors of the coefficients. If the `method` is 'mle'
then they are computed using the numerical Hessian.
|
statsmodels/tsa/ar_model.py
|
bse
|
raamana/statsmodels
| 6 |
python
|
@cache_readonly
def bse(self):
"\n The standard errors of the estimated parameters.\n\n If `method` is 'cmle', then the standard errors that are returned are\n the OLS standard errors of the coefficients. If the `method` is 'mle'\n then they are computed using the numerical Hessian.\n "
if (self.model.method == 'cmle'):
resid = self.resid
ssr = np.dot(resid, resid)
ols_scale = (ssr / ((self.nobs - self.k_ar) - self.k_trend))
return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))
else:
hess = approx_hess(self.params, self.model.loglike)
return np.sqrt(np.diag((- np.linalg.inv(hess))))
|
@cache_readonly
def bse(self):
"\n The standard errors of the estimated parameters.\n\n If `method` is 'cmle', then the standard errors that are returned are\n the OLS standard errors of the coefficients. If the `method` is 'mle'\n then they are computed using the numerical Hessian.\n "
if (self.model.method == 'cmle'):
resid = self.resid
ssr = np.dot(resid, resid)
ols_scale = (ssr / ((self.nobs - self.k_ar) - self.k_trend))
return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))
else:
hess = approx_hess(self.params, self.model.loglike)
return np.sqrt(np.diag((- np.linalg.inv(hess))))<|docstring|>The standard errors of the estimated parameters.
If `method` is 'cmle', then the standard errors that are returned are
the OLS standard errors of the coefficients. If the `method` is 'mle'
then they are computed using the numerical Hessian.<|endoftext|>
|
3f8e4a3fcc18bf54bd04739ba1de530cadee16897fba519969245da79b32aabd
|
@cache_readonly
def pvalues(self):
'The p values associated with the standard errors.'
return (norm.sf(np.abs(self.tvalues)) * 2)
|
The p values associated with the standard errors.
|
statsmodels/tsa/ar_model.py
|
pvalues
|
raamana/statsmodels
| 6 |
python
|
@cache_readonly
def pvalues(self):
return (norm.sf(np.abs(self.tvalues)) * 2)
|
@cache_readonly
def pvalues(self):
return (norm.sf(np.abs(self.tvalues)) * 2)<|docstring|>The p values associated with the standard errors.<|endoftext|>
|
ef2832c7f92f929135e873c976deba7ff6b0f24f53d6dfe488065c5a05e84f42
|
@cache_readonly
def aic(self):
"\n Akaike Information Criterion using Lutkephol's definition.\n\n :math:`log(sigma) + 2*(1 + k_ar + k_trend)/nobs`\n "
return (np.log(self.sigma2) + ((2 * (1 + self.df_model)) / self.nobs))
|
Akaike Information Criterion using Lutkephol's definition.
:math:`log(sigma) + 2*(1 + k_ar + k_trend)/nobs`
|
statsmodels/tsa/ar_model.py
|
aic
|
raamana/statsmodels
| 6 |
python
|
@cache_readonly
def aic(self):
"\n Akaike Information Criterion using Lutkephol's definition.\n\n :math:`log(sigma) + 2*(1 + k_ar + k_trend)/nobs`\n "
return (np.log(self.sigma2) + ((2 * (1 + self.df_model)) / self.nobs))
|
@cache_readonly
def aic(self):
"\n Akaike Information Criterion using Lutkephol's definition.\n\n :math:`log(sigma) + 2*(1 + k_ar + k_trend)/nobs`\n "
return (np.log(self.sigma2) + ((2 * (1 + self.df_model)) / self.nobs))<|docstring|>Akaike Information Criterion using Lutkephol's definition.
:math:`log(sigma) + 2*(1 + k_ar + k_trend)/nobs`<|endoftext|>
|
48ab5e4d10dfec3d6264f8c9ee3d319c676d20a23f94037276218bf4f0676545
|
@cache_readonly
def hqic(self):
'Hannan-Quinn Information Criterion.'
nobs = self.nobs
return (np.log(self.sigma2) + (((2 * np.log(np.log(nobs))) / nobs) * (1 + self.df_model)))
|
Hannan-Quinn Information Criterion.
|
statsmodels/tsa/ar_model.py
|
hqic
|
raamana/statsmodels
| 6 |
python
|
@cache_readonly
def hqic(self):
nobs = self.nobs
return (np.log(self.sigma2) + (((2 * np.log(np.log(nobs))) / nobs) * (1 + self.df_model)))
|
@cache_readonly
def hqic(self):
nobs = self.nobs
return (np.log(self.sigma2) + (((2 * np.log(np.log(nobs))) / nobs) * (1 + self.df_model)))<|docstring|>Hannan-Quinn Information Criterion.<|endoftext|>
|
d60773506b1548c668c9b51b0d5f70bc54f26763e81b696798d58da8166fe410
|
@cache_readonly
def fpe(self):
"\n Final prediction error using Lütkepohl's definition.\n\n ((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma\n "
nobs = self.nobs
df_model = self.df_model
return (((nobs + df_model) / (nobs - df_model)) * self.sigma2)
|
Final prediction error using Lütkepohl's definition.
((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma
|
statsmodels/tsa/ar_model.py
|
fpe
|
raamana/statsmodels
| 6 |
python
|
@cache_readonly
def fpe(self):
"\n Final prediction error using Lütkepohl's definition.\n\n ((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma\n "
nobs = self.nobs
df_model = self.df_model
return (((nobs + df_model) / (nobs - df_model)) * self.sigma2)
|
@cache_readonly
def fpe(self):
"\n Final prediction error using Lütkepohl's definition.\n\n ((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma\n "
nobs = self.nobs
df_model = self.df_model
return (((nobs + df_model) / (nobs - df_model)) * self.sigma2)<|docstring|>Final prediction error using Lütkepohl's definition.
((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma<|endoftext|>
|
075e9932f47cf43b1aaadd46349104ccd26b984837399a4e819cba40614beeef
|
@cache_readonly
def bic(self):
'\n Bayes Information Criterion\n\n :math:`\\log(\\sigma) + (1 + k_ar + k_trend)*\\log(nobs)/nobs`\n '
nobs = self.nobs
return (np.log(self.sigma2) + (((1 + self.df_model) * np.log(nobs)) / nobs))
|
Bayes Information Criterion
:math:`\log(\sigma) + (1 + k_ar + k_trend)*\log(nobs)/nobs`
|
statsmodels/tsa/ar_model.py
|
bic
|
raamana/statsmodels
| 6 |
python
|
@cache_readonly
def bic(self):
'\n Bayes Information Criterion\n\n :math:`\\log(\\sigma) + (1 + k_ar + k_trend)*\\log(nobs)/nobs`\n '
nobs = self.nobs
return (np.log(self.sigma2) + (((1 + self.df_model) * np.log(nobs)) / nobs))
|
@cache_readonly
def bic(self):
'\n Bayes Information Criterion\n\n :math:`\\log(\\sigma) + (1 + k_ar + k_trend)*\\log(nobs)/nobs`\n '
nobs = self.nobs
return (np.log(self.sigma2) + (((1 + self.df_model) * np.log(nobs)) / nobs))<|docstring|>Bayes Information Criterion
:math:`\log(\sigma) + (1 + k_ar + k_trend)*\log(nobs)/nobs`<|endoftext|>
|
4b4dc1a8a642b4f5b9d6bd286535b5e1ba795e1bd8b13a5f20279cdcf968f7bb
|
@cache_readonly
def resid(self):
"\n The residuals of the model.\n\n If the model is fit by 'mle' then the pre-sample residuals are\n calculated using fittedvalues from the Kalman Filter.\n "
model = self.model
endog = model.endog.squeeze()
if (model.method == 'cmle'):
return (endog[self.k_ar:] - self.fittedvalues)
else:
return (model.endog.squeeze() - self.fittedvalues)
|
The residuals of the model.
If the model is fit by 'mle' then the pre-sample residuals are
calculated using fittedvalues from the Kalman Filter.
|
statsmodels/tsa/ar_model.py
|
resid
|
raamana/statsmodels
| 6 |
python
|
@cache_readonly
def resid(self):
"\n The residuals of the model.\n\n If the model is fit by 'mle' then the pre-sample residuals are\n calculated using fittedvalues from the Kalman Filter.\n "
model = self.model
endog = model.endog.squeeze()
if (model.method == 'cmle'):
return (endog[self.k_ar:] - self.fittedvalues)
else:
return (model.endog.squeeze() - self.fittedvalues)
|
@cache_readonly
def resid(self):
"\n The residuals of the model.\n\n If the model is fit by 'mle' then the pre-sample residuals are\n calculated using fittedvalues from the Kalman Filter.\n "
model = self.model
endog = model.endog.squeeze()
if (model.method == 'cmle'):
return (endog[self.k_ar:] - self.fittedvalues)
else:
return (model.endog.squeeze() - self.fittedvalues)<|docstring|>The residuals of the model.
If the model is fit by 'mle' then the pre-sample residuals are
calculated using fittedvalues from the Kalman Filter.<|endoftext|>
|
a218c4e7c48558d1f6b1be62023b4cfd5b68348e140b11deeb8b503e172002d2
|
@cache_readonly
def roots(self):
'\n The roots of the AR process.\n\n The roots are the solution to\n (1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0.\n Stability requires that the roots in modulus lie outside the unit\n circle.\n '
k = self.k_trend
return (np.roots(np.r_[(1, (- self.params[k:]))]) ** (- 1))
|
The roots of the AR process.
The roots are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0.
Stability requires that the roots in modulus lie outside the unit
circle.
|
statsmodels/tsa/ar_model.py
|
roots
|
raamana/statsmodels
| 6 |
python
|
@cache_readonly
def roots(self):
'\n The roots of the AR process.\n\n The roots are the solution to\n (1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0.\n Stability requires that the roots in modulus lie outside the unit\n circle.\n '
k = self.k_trend
return (np.roots(np.r_[(1, (- self.params[k:]))]) ** (- 1))
|
@cache_readonly
def roots(self):
'\n The roots of the AR process.\n\n The roots are the solution to\n (1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0.\n Stability requires that the roots in modulus lie outside the unit\n circle.\n '
k = self.k_trend
return (np.roots(np.r_[(1, (- self.params[k:]))]) ** (- 1))<|docstring|>The roots of the AR process.
The roots are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0.
Stability requires that the roots in modulus lie outside the unit
circle.<|endoftext|>
|
31825a2c4fd88c8ac2711e9f4c05a59b5647cfeeee894d9eda81001f8d779205
|
@cache_readonly
def arfreq(self):
'\n Returns the frequency of the AR roots.\n\n This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the\n roots.\n '
z = self.roots
return (np.arctan2(z.imag, z.real) / (2 * np.pi))
|
Returns the frequency of the AR roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.
|
statsmodels/tsa/ar_model.py
|
arfreq
|
raamana/statsmodels
| 6 |
python
|
@cache_readonly
def arfreq(self):
'\n Returns the frequency of the AR roots.\n\n This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the\n roots.\n '
z = self.roots
return (np.arctan2(z.imag, z.real) / (2 * np.pi))
|
@cache_readonly
def arfreq(self):
'\n Returns the frequency of the AR roots.\n\n This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the\n roots.\n '
z = self.roots
return (np.arctan2(z.imag, z.real) / (2 * np.pi))<|docstring|>Returns the frequency of the AR roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.<|endoftext|>
|
ef4fee17e987ae6dc7298508693cc445763a8c67aec4a2a8b74da6d549ac8063
|
@cache_readonly
def fittedvalues(self):
'\n The in-sample predicted values of the fitted AR model.\n\n The `k_ar` initial values are computed via the Kalman Filter if the\n model is fit by `mle`.\n '
return self.model.predict(self.params)
|
The in-sample predicted values of the fitted AR model.
The `k_ar` initial values are computed via the Kalman Filter if the
model is fit by `mle`.
|
statsmodels/tsa/ar_model.py
|
fittedvalues
|
raamana/statsmodels
| 6 |
python
|
@cache_readonly
def fittedvalues(self):
'\n The in-sample predicted values of the fitted AR model.\n\n The `k_ar` initial values are computed via the Kalman Filter if the\n model is fit by `mle`.\n '
return self.model.predict(self.params)
|
@cache_readonly
def fittedvalues(self):
'\n The in-sample predicted values of the fitted AR model.\n\n The `k_ar` initial values are computed via the Kalman Filter if the\n model is fit by `mle`.\n '
return self.model.predict(self.params)<|docstring|>The in-sample predicted values of the fitted AR model.
The `k_ar` initial values are computed via the Kalman Filter if the
model is fit by `mle`.<|endoftext|>
|
56df97556bb7f8a5ff87aac4963de115b243f746f23204b9da5ae831c7aecdbf
|
def summary(self, alpha=0.05):
'Summarize the Model\n\n Parameters\n ----------\n alpha : float, optional\n Significance level for the confidence intervals.\n\n Returns\n -------\n smry : Summary instance\n This holds the summary table and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary\n '
model = self.model
title = (model.__class__.__name__ + ' Model Results')
method = model.method
start = (0 if ('mle' in method) else self.k_ar)
if (self.data.dates is not None):
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += [('- ' + dates[(- 1)].strftime('%m-%d-%Y'))]
else:
sample = ((str(start) + ' - ') + str(len(self.data.orig_endog)))
k_ar = self.k_ar
order = '({0})'.format(k_ar)
dep_name = str(self.model.endog_names)
top_left = [('Dep. Variable:', dep_name), ('Model:', [(model.__class__.__name__ + order)]), ('Method:', [method]), ('Date:', None), ('Time:', None), ('Sample:', [sample[0]]), ('', [sample[1]])]
top_right = [('No. Observations:', [str(len(self.model.endog))]), ('Log Likelihood', [('%#5.3f' % self.llf)]), ('S.D. of innovations', [('%#5.3f' % (self.sigma2 ** 0.5))]), ('AIC', [('%#5.3f' % self.aic)]), ('BIC', [('%#5.3f' % self.bic)]), ('HQIC', [('%#5.3f' % self.hqic)])]
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right, title=title)
smry.add_table_params(self, alpha=alpha, use_t=False)
from statsmodels.iolib.table import SimpleTable
if k_ar:
arstubs = [('AR.%d' % i) for i in range(1, (k_ar + 1))]
stubs = arstubs
roots = self.roots
freq = self.arfreq
else:
stubs = []
if len(stubs):
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
roots_table = SimpleTable([(('%17.4f' % row[0]), ('%+17.4fj' % row[1]), ('%17.4f' % row[2]), ('%17.4f' % row[3])) for row in data], headers=[' Real', ' Imaginary', ' Modulus', ' Frequency'], title='Roots', stubs=stubs)
smry.tables.append(roots_table)
return smry
|
Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals.
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
|
statsmodels/tsa/ar_model.py
|
summary
|
raamana/statsmodels
| 6 |
python
|
def summary(self, alpha=0.05):
'Summarize the Model\n\n Parameters\n ----------\n alpha : float, optional\n Significance level for the confidence intervals.\n\n Returns\n -------\n smry : Summary instance\n This holds the summary table and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary\n '
model = self.model
title = (model.__class__.__name__ + ' Model Results')
method = model.method
start = (0 if ('mle' in method) else self.k_ar)
if (self.data.dates is not None):
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += [('- ' + dates[(- 1)].strftime('%m-%d-%Y'))]
else:
sample = ((str(start) + ' - ') + str(len(self.data.orig_endog)))
k_ar = self.k_ar
order = '({0})'.format(k_ar)
dep_name = str(self.model.endog_names)
top_left = [('Dep. Variable:', dep_name), ('Model:', [(model.__class__.__name__ + order)]), ('Method:', [method]), ('Date:', None), ('Time:', None), ('Sample:', [sample[0]]), (, [sample[1]])]
top_right = [('No. Observations:', [str(len(self.model.endog))]), ('Log Likelihood', [('%#5.3f' % self.llf)]), ('S.D. of innovations', [('%#5.3f' % (self.sigma2 ** 0.5))]), ('AIC', [('%#5.3f' % self.aic)]), ('BIC', [('%#5.3f' % self.bic)]), ('HQIC', [('%#5.3f' % self.hqic)])]
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right, title=title)
smry.add_table_params(self, alpha=alpha, use_t=False)
from statsmodels.iolib.table import SimpleTable
if k_ar:
arstubs = [('AR.%d' % i) for i in range(1, (k_ar + 1))]
stubs = arstubs
roots = self.roots
freq = self.arfreq
else:
stubs = []
if len(stubs):
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
roots_table = SimpleTable([(('%17.4f' % row[0]), ('%+17.4fj' % row[1]), ('%17.4f' % row[2]), ('%17.4f' % row[3])) for row in data], headers=[' Real', ' Imaginary', ' Modulus', ' Frequency'], title='Roots', stubs=stubs)
smry.tables.append(roots_table)
return smry
|
def summary(self, alpha=0.05):
'Summarize the Model\n\n Parameters\n ----------\n alpha : float, optional\n Significance level for the confidence intervals.\n\n Returns\n -------\n smry : Summary instance\n This holds the summary table and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary\n '
model = self.model
title = (model.__class__.__name__ + ' Model Results')
method = model.method
start = (0 if ('mle' in method) else self.k_ar)
if (self.data.dates is not None):
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += [('- ' + dates[(- 1)].strftime('%m-%d-%Y'))]
else:
sample = ((str(start) + ' - ') + str(len(self.data.orig_endog)))
k_ar = self.k_ar
order = '({0})'.format(k_ar)
dep_name = str(self.model.endog_names)
top_left = [('Dep. Variable:', dep_name), ('Model:', [(model.__class__.__name__ + order)]), ('Method:', [method]), ('Date:', None), ('Time:', None), ('Sample:', [sample[0]]), (, [sample[1]])]
top_right = [('No. Observations:', [str(len(self.model.endog))]), ('Log Likelihood', [('%#5.3f' % self.llf)]), ('S.D. of innovations', [('%#5.3f' % (self.sigma2 ** 0.5))]), ('AIC', [('%#5.3f' % self.aic)]), ('BIC', [('%#5.3f' % self.bic)]), ('HQIC', [('%#5.3f' % self.hqic)])]
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right, title=title)
smry.add_table_params(self, alpha=alpha, use_t=False)
from statsmodels.iolib.table import SimpleTable
if k_ar:
arstubs = [('AR.%d' % i) for i in range(1, (k_ar + 1))]
stubs = arstubs
roots = self.roots
freq = self.arfreq
else:
stubs = []
if len(stubs):
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
roots_table = SimpleTable([(('%17.4f' % row[0]), ('%+17.4fj' % row[1]), ('%17.4f' % row[2]), ('%17.4f' % row[3])) for row in data], headers=[' Real', ' Imaginary', ' Modulus', ' Frequency'], title='Roots', stubs=stubs)
smry.tables.append(roots_table)
return smry<|docstring|>Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals.
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary<|endoftext|>
|
0ec77d99f57c48a9f56422fe845798d9cb8b8197932616017dca478985fee4b6
|
def transform_to_renderer_frame(self, T_view_world):
'\n Args:\n - T_view_world: (batch x 4 x 4) transformation\n in shapenet coordinates (East-Up-South)\n Returns:\n - (batch x 4 x 4) transformation in renderer frame (East-Down-North)\n '
batch_size = T_view_world.size(0)
device = T_view_world.device
self.T_renderer_shapenet = self.T_renderer_shapenet.to(device)
self.T_shapenet_renderer = self.T_shapenet_renderer.to(device)
T_renderer_shapenet = self.T_renderer_shapenet.unsqueeze(0).expand(batch_size, (- 1), (- 1))
T_shapenet_renderer = self.T_shapenet_renderer.unsqueeze(0).expand(batch_size, (- 1), (- 1))
T_view_world = torch.bmm(T_renderer_shapenet, T_view_world)
return T_view_world
|
Args:
- T_view_world: (batch x 4 x 4) transformation
in shapenet coordinates (East-Up-South)
Returns:
- (batch x 4 x 4) transformation in renderer frame (East-Down-North)
|
shapenet/modeling/heads/depth_renderer.py
|
transform_to_renderer_frame
|
rakeshshrestha31/meshmvs
| 6 |
python
|
def transform_to_renderer_frame(self, T_view_world):
'\n Args:\n - T_view_world: (batch x 4 x 4) transformation\n in shapenet coordinates (East-Up-South)\n Returns:\n - (batch x 4 x 4) transformation in renderer frame (East-Down-North)\n '
batch_size = T_view_world.size(0)
device = T_view_world.device
self.T_renderer_shapenet = self.T_renderer_shapenet.to(device)
self.T_shapenet_renderer = self.T_shapenet_renderer.to(device)
T_renderer_shapenet = self.T_renderer_shapenet.unsqueeze(0).expand(batch_size, (- 1), (- 1))
T_shapenet_renderer = self.T_shapenet_renderer.unsqueeze(0).expand(batch_size, (- 1), (- 1))
T_view_world = torch.bmm(T_renderer_shapenet, T_view_world)
return T_view_world
|
def transform_to_renderer_frame(self, T_view_world):
'\n Args:\n - T_view_world: (batch x 4 x 4) transformation\n in shapenet coordinates (East-Up-South)\n Returns:\n - (batch x 4 x 4) transformation in renderer frame (East-Down-North)\n '
batch_size = T_view_world.size(0)
device = T_view_world.device
self.T_renderer_shapenet = self.T_renderer_shapenet.to(device)
self.T_shapenet_renderer = self.T_shapenet_renderer.to(device)
T_renderer_shapenet = self.T_renderer_shapenet.unsqueeze(0).expand(batch_size, (- 1), (- 1))
T_shapenet_renderer = self.T_shapenet_renderer.unsqueeze(0).expand(batch_size, (- 1), (- 1))
T_view_world = torch.bmm(T_renderer_shapenet, T_view_world)
return T_view_world<|docstring|>Args:
- T_view_world: (batch x 4 x 4) transformation
in shapenet coordinates (East-Up-South)
Returns:
- (batch x 4 x 4) transformation in renderer frame (East-Down-North)<|endoftext|>
|
ad39c7ef3fca6fc1128c4e2c1cc0af2ed9ecee6fd0ab060e03f947b78bfc2d4a
|
def forward(self, coords, faces, extrinsics, image_shape):
'\n Multi-view rendering\n Args:\n - pred_coords: (batch x vertices x 3) tensor\n - faces: (batch x faces x 3) tensor\n - image_shape: shape of the depth image to be rendered\n - extrinsics: (batch x view x 2 x 4 x 4) tensor\n Returns:\n - depth tensor batch x view x height x width\n '
batch_size = extrinsics.size(0)
num_views = extrinsics.size(1)
coords_augmented = coords.unsqueeze(1).expand((- 1), num_views, (- 1), (- 1)).contiguous()
faces_augmented = faces.unsqueeze(1).expand((- 1), num_views, (- 1), (- 1)).contiguous()
depth_flattened = self.render_depth(_flatten_batch_view(coords_augmented), _flatten_batch_view(faces_augmented), _flatten_batch_view(extrinsics), image_shape)
return _unflatten_batch_view(depth_flattened, batch_size)
|
Multi-view rendering
Args:
- pred_coords: (batch x vertices x 3) tensor
- faces: (batch x faces x 3) tensor
- image_shape: shape of the depth image to be rendered
- extrinsics: (batch x view x 2 x 4 x 4) tensor
Returns:
- depth tensor batch x view x height x width
|
shapenet/modeling/heads/depth_renderer.py
|
forward
|
rakeshshrestha31/meshmvs
| 6 |
python
|
def forward(self, coords, faces, extrinsics, image_shape):
'\n Multi-view rendering\n Args:\n - pred_coords: (batch x vertices x 3) tensor\n - faces: (batch x faces x 3) tensor\n - image_shape: shape of the depth image to be rendered\n - extrinsics: (batch x view x 2 x 4 x 4) tensor\n Returns:\n - depth tensor batch x view x height x width\n '
batch_size = extrinsics.size(0)
num_views = extrinsics.size(1)
coords_augmented = coords.unsqueeze(1).expand((- 1), num_views, (- 1), (- 1)).contiguous()
faces_augmented = faces.unsqueeze(1).expand((- 1), num_views, (- 1), (- 1)).contiguous()
depth_flattened = self.render_depth(_flatten_batch_view(coords_augmented), _flatten_batch_view(faces_augmented), _flatten_batch_view(extrinsics), image_shape)
return _unflatten_batch_view(depth_flattened, batch_size)
|
def forward(self, coords, faces, extrinsics, image_shape):
'\n Multi-view rendering\n Args:\n - pred_coords: (batch x vertices x 3) tensor\n - faces: (batch x faces x 3) tensor\n - image_shape: shape of the depth image to be rendered\n - extrinsics: (batch x view x 2 x 4 x 4) tensor\n Returns:\n - depth tensor batch x view x height x width\n '
batch_size = extrinsics.size(0)
num_views = extrinsics.size(1)
coords_augmented = coords.unsqueeze(1).expand((- 1), num_views, (- 1), (- 1)).contiguous()
faces_augmented = faces.unsqueeze(1).expand((- 1), num_views, (- 1), (- 1)).contiguous()
depth_flattened = self.render_depth(_flatten_batch_view(coords_augmented), _flatten_batch_view(faces_augmented), _flatten_batch_view(extrinsics), image_shape)
return _unflatten_batch_view(depth_flattened, batch_size)<|docstring|>Multi-view rendering
Args:
- pred_coords: (batch x vertices x 3) tensor
- faces: (batch x faces x 3) tensor
- image_shape: shape of the depth image to be rendered
- extrinsics: (batch x view x 2 x 4 x 4) tensor
Returns:
- depth tensor batch x view x height x width<|endoftext|>
|
f8a74ef9f7cd3756d612dc0b4ed2373760f3e648aa12fd56d393481c2dab8bd0
|
def render_depth(self, coords, faces, T_view_world, image_shape):
'\n renders a batch of depths\n Args:\n - pred_coords: (batch x vertices x 3) tensor\n - faces: (batch x faces x 3) tensor\n - image_shape shape of the depth image to be rendered\n - T_view_world: (batch x 4 x 4) transformation\n in shapenet coordinates (EUS)\n Returns:\n - depth tensors of shape (batch x h x w)\n '
image_size = image_shape.max()
self.renderer.image_size = image_size
(batch_size, num_points) = coords.size()[:2]
device = coords.device
self.camera_k = self.camera_k.to(device)
self.dist_coeffs = self.dist_coeffs.to(device)
faces = faces.type(torch.int32).to(device)
dist_coeffs = self.dist_coeffs.unsqueeze(0).expand(batch_size, (- 1))
T_view_world = self.transform_to_renderer_frame(T_view_world)
R = T_view_world[(:, :3, :3)]
t = T_view_world[(:, :3, 3)].unsqueeze(1)
depth = self.renderer(vertices=coords, faces=faces, mode='depth', K=self.camera_k.unsqueeze(0), dist_coeffs=dist_coeffs, R=R, t=t, orig_size=image_size)
depth[(depth <= self.renderer.near)] = 0
depth[(depth >= self.renderer.far)] = 0
return depth
|
renders a batch of depths
Args:
- pred_coords: (batch x vertices x 3) tensor
- faces: (batch x faces x 3) tensor
- image_shape shape of the depth image to be rendered
- T_view_world: (batch x 4 x 4) transformation
in shapenet coordinates (EUS)
Returns:
- depth tensors of shape (batch x h x w)
|
shapenet/modeling/heads/depth_renderer.py
|
render_depth
|
rakeshshrestha31/meshmvs
| 6 |
python
|
def render_depth(self, coords, faces, T_view_world, image_shape):
'\n renders a batch of depths\n Args:\n - pred_coords: (batch x vertices x 3) tensor\n - faces: (batch x faces x 3) tensor\n - image_shape shape of the depth image to be rendered\n - T_view_world: (batch x 4 x 4) transformation\n in shapenet coordinates (EUS)\n Returns:\n - depth tensors of shape (batch x h x w)\n '
image_size = image_shape.max()
self.renderer.image_size = image_size
(batch_size, num_points) = coords.size()[:2]
device = coords.device
self.camera_k = self.camera_k.to(device)
self.dist_coeffs = self.dist_coeffs.to(device)
faces = faces.type(torch.int32).to(device)
dist_coeffs = self.dist_coeffs.unsqueeze(0).expand(batch_size, (- 1))
T_view_world = self.transform_to_renderer_frame(T_view_world)
R = T_view_world[(:, :3, :3)]
t = T_view_world[(:, :3, 3)].unsqueeze(1)
depth = self.renderer(vertices=coords, faces=faces, mode='depth', K=self.camera_k.unsqueeze(0), dist_coeffs=dist_coeffs, R=R, t=t, orig_size=image_size)
depth[(depth <= self.renderer.near)] = 0
depth[(depth >= self.renderer.far)] = 0
return depth
|
def render_depth(self, coords, faces, T_view_world, image_shape):
'\n renders a batch of depths\n Args:\n - pred_coords: (batch x vertices x 3) tensor\n - faces: (batch x faces x 3) tensor\n - image_shape shape of the depth image to be rendered\n - T_view_world: (batch x 4 x 4) transformation\n in shapenet coordinates (EUS)\n Returns:\n - depth tensors of shape (batch x h x w)\n '
image_size = image_shape.max()
self.renderer.image_size = image_size
(batch_size, num_points) = coords.size()[:2]
device = coords.device
self.camera_k = self.camera_k.to(device)
self.dist_coeffs = self.dist_coeffs.to(device)
faces = faces.type(torch.int32).to(device)
dist_coeffs = self.dist_coeffs.unsqueeze(0).expand(batch_size, (- 1))
T_view_world = self.transform_to_renderer_frame(T_view_world)
R = T_view_world[(:, :3, :3)]
t = T_view_world[(:, :3, 3)].unsqueeze(1)
depth = self.renderer(vertices=coords, faces=faces, mode='depth', K=self.camera_k.unsqueeze(0), dist_coeffs=dist_coeffs, R=R, t=t, orig_size=image_size)
depth[(depth <= self.renderer.near)] = 0
depth[(depth >= self.renderer.far)] = 0
return depth<|docstring|>renders a batch of depths
Args:
- pred_coords: (batch x vertices x 3) tensor
- faces: (batch x faces x 3) tensor
- image_shape shape of the depth image to be rendered
- T_view_world: (batch x 4 x 4) transformation
in shapenet coordinates (EUS)
Returns:
- depth tensors of shape (batch x h x w)<|endoftext|>
|
51be86d16eb1be5fd2f62b4de5f70289d4017b0107668b8410821a7580e3ad4c
|
def test_erc_681_url_for_L1():
'Test for both native asset and token'
erc681_url = helpers.make_erc_681_url('0xtest1', '10')
assert (erc681_url == 'ethereum:0xtest1?value=10')
erc681_url = helpers.make_erc_681_url('0xtest1', '10', is_token=True, token_address='0xtoken')
assert (erc681_url == 'ethereum:0xtoken/transfer?address=0xtest1&uint256=10')
|
Test for both native asset and token
|
tests/tokens/test_token_methods.py
|
test_erc_681_url_for_L1
|
mikulas-mrva/pretix-eth-payment-plugin
| 1 |
python
|
def test_erc_681_url_for_L1():
erc681_url = helpers.make_erc_681_url('0xtest1', '10')
assert (erc681_url == 'ethereum:0xtest1?value=10')
erc681_url = helpers.make_erc_681_url('0xtest1', '10', is_token=True, token_address='0xtoken')
assert (erc681_url == 'ethereum:0xtoken/transfer?address=0xtest1&uint256=10')
|
def test_erc_681_url_for_L1():
erc681_url = helpers.make_erc_681_url('0xtest1', '10')
assert (erc681_url == 'ethereum:0xtest1?value=10')
erc681_url = helpers.make_erc_681_url('0xtest1', '10', is_token=True, token_address='0xtoken')
assert (erc681_url == 'ethereum:0xtoken/transfer?address=0xtest1&uint256=10')<|docstring|>Test for both native asset and token<|endoftext|>
|
7dd5e5a8f3d27e416ccb94fbac0af9a31089ccdcf01ce0b1a7544d8f27059ff5
|
def test_make_erc_681_url_for_L2():
'Test for both native asset and token'
erc681_url = helpers.make_erc_681_url('0xtest1', '10', chain_id=3)
assert (erc681_url == 'ethereum:0xtest1@3?value=10')
erc681_url = helpers.make_erc_681_url('0xtest1', '10', chain_id=3, is_token=True, token_address='0xtoken')
assert (erc681_url == 'ethereum:0xtoken@3/transfer?address=0xtest1&uint256=10')
|
Test for both native asset and token
|
tests/tokens/test_token_methods.py
|
test_make_erc_681_url_for_L2
|
mikulas-mrva/pretix-eth-payment-plugin
| 1 |
python
|
def test_make_erc_681_url_for_L2():
erc681_url = helpers.make_erc_681_url('0xtest1', '10', chain_id=3)
assert (erc681_url == 'ethereum:0xtest1@3?value=10')
erc681_url = helpers.make_erc_681_url('0xtest1', '10', chain_id=3, is_token=True, token_address='0xtoken')
assert (erc681_url == 'ethereum:0xtoken@3/transfer?address=0xtest1&uint256=10')
|
def test_make_erc_681_url_for_L2():
erc681_url = helpers.make_erc_681_url('0xtest1', '10', chain_id=3)
assert (erc681_url == 'ethereum:0xtest1@3?value=10')
erc681_url = helpers.make_erc_681_url('0xtest1', '10', chain_id=3, is_token=True, token_address='0xtoken')
assert (erc681_url == 'ethereum:0xtoken@3/transfer?address=0xtest1&uint256=10')<|docstring|>Test for both native asset and token<|endoftext|>
|
d297cf6b7a014515990cd7325834f1d95bb0540b648a12322d5a25c8e04bdbac
|
def create_jwt(project_id, private_key_file, algorithm):
"Creates a JWT (https://jwt.io) to establish an MQTT connection.\n Args:\n project_id: The cloud project ID this device belongs to\n private_key_file: A path to a file containing either an RSA256 or\n ES256 private key.\n algorithm: The encryption algorithm to use. Either 'RS256' or 'ES256'\n Returns:\n A JWT generated from the given project_id and private key, which\n expires in 20 minutes. After 20 minutes, your client will be\n disconnected, and a new JWT will have to be generated.\n Raises:\n ValueError: If the private_key_file does not contain a known key.\n "
token = {'iat': datetime.datetime.utcnow(), 'exp': (datetime.datetime.utcnow() + datetime.timedelta(minutes=20)), 'aud': project_id}
with open(private_key_file, 'r') as f:
private_key = f.read()
print('Creating JWT using {} from private key file {}'.format(algorithm, private_key_file))
return jwt.encode(token, private_key, algorithm=algorithm)
|
Creates a JWT (https://jwt.io) to establish an MQTT connection.
Args:
project_id: The cloud project ID this device belongs to
private_key_file: A path to a file containing either an RSA256 or
ES256 private key.
algorithm: The encryption algorithm to use. Either 'RS256' or 'ES256'
Returns:
A JWT generated from the given project_id and private key, which
expires in 20 minutes. After 20 minutes, your client will be
disconnected, and a new JWT will have to be generated.
Raises:
ValueError: If the private_key_file does not contain a known key.
|
pyclient/gcpIoTclient.py
|
create_jwt
|
lkk688/IoTCloudConnect
| 1 |
python
|
def create_jwt(project_id, private_key_file, algorithm):
"Creates a JWT (https://jwt.io) to establish an MQTT connection.\n Args:\n project_id: The cloud project ID this device belongs to\n private_key_file: A path to a file containing either an RSA256 or\n ES256 private key.\n algorithm: The encryption algorithm to use. Either 'RS256' or 'ES256'\n Returns:\n A JWT generated from the given project_id and private key, which\n expires in 20 minutes. After 20 minutes, your client will be\n disconnected, and a new JWT will have to be generated.\n Raises:\n ValueError: If the private_key_file does not contain a known key.\n "
token = {'iat': datetime.datetime.utcnow(), 'exp': (datetime.datetime.utcnow() + datetime.timedelta(minutes=20)), 'aud': project_id}
with open(private_key_file, 'r') as f:
private_key = f.read()
print('Creating JWT using {} from private key file {}'.format(algorithm, private_key_file))
return jwt.encode(token, private_key, algorithm=algorithm)
|
def create_jwt(project_id, private_key_file, algorithm):
"Creates a JWT (https://jwt.io) to establish an MQTT connection.\n Args:\n project_id: The cloud project ID this device belongs to\n private_key_file: A path to a file containing either an RSA256 or\n ES256 private key.\n algorithm: The encryption algorithm to use. Either 'RS256' or 'ES256'\n Returns:\n A JWT generated from the given project_id and private key, which\n expires in 20 minutes. After 20 minutes, your client will be\n disconnected, and a new JWT will have to be generated.\n Raises:\n ValueError: If the private_key_file does not contain a known key.\n "
token = {'iat': datetime.datetime.utcnow(), 'exp': (datetime.datetime.utcnow() + datetime.timedelta(minutes=20)), 'aud': project_id}
with open(private_key_file, 'r') as f:
private_key = f.read()
print('Creating JWT using {} from private key file {}'.format(algorithm, private_key_file))
return jwt.encode(token, private_key, algorithm=algorithm)<|docstring|>Creates a JWT (https://jwt.io) to establish an MQTT connection.
Args:
project_id: The cloud project ID this device belongs to
private_key_file: A path to a file containing either an RSA256 or
ES256 private key.
algorithm: The encryption algorithm to use. Either 'RS256' or 'ES256'
Returns:
A JWT generated from the given project_id and private key, which
expires in 20 minutes. After 20 minutes, your client will be
disconnected, and a new JWT will have to be generated.
Raises:
ValueError: If the private_key_file does not contain a known key.<|endoftext|>
|
dc16172cdb1634aff3b530eb77a6348faaa93076e71b14bbfcabf9bb1e387cce
|
def error_str(rc):
'Convert a Paho error to a human readable string.'
return '{}: {}'.format(rc, mqtt.error_string(rc))
|
Convert a Paho error to a human readable string.
|
pyclient/gcpIoTclient.py
|
error_str
|
lkk688/IoTCloudConnect
| 1 |
python
|
def error_str(rc):
return '{}: {}'.format(rc, mqtt.error_string(rc))
|
def error_str(rc):
return '{}: {}'.format(rc, mqtt.error_string(rc))<|docstring|>Convert a Paho error to a human readable string.<|endoftext|>
|
5b4ec1675b10709c94ac987ed69a5cffef826fdc49d83ed69067779417feb3e9
|
def on_connect(unused_client, unused_userdata, unused_flags, rc):
'Callback for when a device connects.'
print('on_connect', mqtt.connack_string(rc))
global should_backoff
global minimum_backoff_time
should_backoff = False
minimum_backoff_time = 1
|
Callback for when a device connects.
|
pyclient/gcpIoTclient.py
|
on_connect
|
lkk688/IoTCloudConnect
| 1 |
python
|
def on_connect(unused_client, unused_userdata, unused_flags, rc):
print('on_connect', mqtt.connack_string(rc))
global should_backoff
global minimum_backoff_time
should_backoff = False
minimum_backoff_time = 1
|
def on_connect(unused_client, unused_userdata, unused_flags, rc):
print('on_connect', mqtt.connack_string(rc))
global should_backoff
global minimum_backoff_time
should_backoff = False
minimum_backoff_time = 1<|docstring|>Callback for when a device connects.<|endoftext|>
|
69f5df89897bf796bb1d635895fbad794602293078d341c2cf882cef0de310b6
|
def on_disconnect(unused_client, unused_userdata, rc):
'Paho callback for when a device disconnects.'
print('on_disconnect', error_str(rc))
global should_backoff
should_backoff = True
|
Paho callback for when a device disconnects.
|
pyclient/gcpIoTclient.py
|
on_disconnect
|
lkk688/IoTCloudConnect
| 1 |
python
|
def on_disconnect(unused_client, unused_userdata, rc):
print('on_disconnect', error_str(rc))
global should_backoff
should_backoff = True
|
def on_disconnect(unused_client, unused_userdata, rc):
print('on_disconnect', error_str(rc))
global should_backoff
should_backoff = True<|docstring|>Paho callback for when a device disconnects.<|endoftext|>
|
f51bbb89ca14a76eb00d0f6d8aca8f5bab66f724adfbce4f45fb00f5a8f4f89d
|
def on_publish(unused_client, unused_userdata, unused_mid):
'Paho callback when a message is sent to the broker.'
print('on_publish')
|
Paho callback when a message is sent to the broker.
|
pyclient/gcpIoTclient.py
|
on_publish
|
lkk688/IoTCloudConnect
| 1 |
python
|
def on_publish(unused_client, unused_userdata, unused_mid):
print('on_publish')
|
def on_publish(unused_client, unused_userdata, unused_mid):
print('on_publish')<|docstring|>Paho callback when a message is sent to the broker.<|endoftext|>
|
53b0c26c1d7f446544711b415dc903ed43a8b0b671ac942d1590136c7957be43
|
def on_message(unused_client, unused_userdata, message):
'Callback when the device receives a message on a subscription.'
payload = str(message.payload.decode('utf-8'))
print("Received message '{}' on topic '{}' with Qos {}".format(payload, message.topic, str(message.qos)))
|
Callback when the device receives a message on a subscription.
|
pyclient/gcpIoTclient.py
|
on_message
|
lkk688/IoTCloudConnect
| 1 |
python
|
def on_message(unused_client, unused_userdata, message):
payload = str(message.payload.decode('utf-8'))
print("Received message '{}' on topic '{}' with Qos {}".format(payload, message.topic, str(message.qos)))
|
def on_message(unused_client, unused_userdata, message):
payload = str(message.payload.decode('utf-8'))
print("Received message '{}' on topic '{}' with Qos {}".format(payload, message.topic, str(message.qos)))<|docstring|>Callback when the device receives a message on a subscription.<|endoftext|>
|
1321eff056db6d54cbffb2f9d43828cea936fe81ec2dd679612d12c061fb2a6f
|
def get_client(project_id, cloud_region, registry_id, device_id, private_key_file, algorithm, ca_certs, mqtt_bridge_hostname, mqtt_bridge_port):
'Create our MQTT client. The client_id is a unique string that identifies\n this device. For Google Cloud IoT Core, it must be in the format below.'
client_id = 'projects/{}/locations/{}/registries/{}/devices/{}'.format(project_id, cloud_region, registry_id, device_id)
print("Device client_id is '{}'".format(client_id))
client = mqtt.Client(client_id=client_id)
client.username_pw_set(username='unused', password=create_jwt(project_id, private_key_file, algorithm))
client.tls_set(ca_certs=ca_certs, tls_version=ssl.PROTOCOL_TLSv1_2)
client.on_connect = on_connect
client.on_publish = on_publish
client.on_disconnect = on_disconnect
client.on_message = on_message
client.connect(mqtt_bridge_hostname, mqtt_bridge_port)
mqtt_config_topic = '/devices/{}/config'.format(device_id)
client.subscribe(mqtt_config_topic, qos=1)
mqtt_command_topic = '/devices/{}/commands/#'.format(device_id)
print('Subscribing to {}'.format(mqtt_command_topic))
client.subscribe(mqtt_command_topic, qos=0)
return client
|
Create our MQTT client. The client_id is a unique string that identifies
this device. For Google Cloud IoT Core, it must be in the format below.
|
pyclient/gcpIoTclient.py
|
get_client
|
lkk688/IoTCloudConnect
| 1 |
python
|
def get_client(project_id, cloud_region, registry_id, device_id, private_key_file, algorithm, ca_certs, mqtt_bridge_hostname, mqtt_bridge_port):
'Create our MQTT client. The client_id is a unique string that identifies\n this device. For Google Cloud IoT Core, it must be in the format below.'
client_id = 'projects/{}/locations/{}/registries/{}/devices/{}'.format(project_id, cloud_region, registry_id, device_id)
print("Device client_id is '{}'".format(client_id))
client = mqtt.Client(client_id=client_id)
client.username_pw_set(username='unused', password=create_jwt(project_id, private_key_file, algorithm))
client.tls_set(ca_certs=ca_certs, tls_version=ssl.PROTOCOL_TLSv1_2)
client.on_connect = on_connect
client.on_publish = on_publish
client.on_disconnect = on_disconnect
client.on_message = on_message
client.connect(mqtt_bridge_hostname, mqtt_bridge_port)
mqtt_config_topic = '/devices/{}/config'.format(device_id)
client.subscribe(mqtt_config_topic, qos=1)
mqtt_command_topic = '/devices/{}/commands/#'.format(device_id)
print('Subscribing to {}'.format(mqtt_command_topic))
client.subscribe(mqtt_command_topic, qos=0)
return client
|
def get_client(project_id, cloud_region, registry_id, device_id, private_key_file, algorithm, ca_certs, mqtt_bridge_hostname, mqtt_bridge_port):
'Create our MQTT client. The client_id is a unique string that identifies\n this device. For Google Cloud IoT Core, it must be in the format below.'
client_id = 'projects/{}/locations/{}/registries/{}/devices/{}'.format(project_id, cloud_region, registry_id, device_id)
print("Device client_id is '{}'".format(client_id))
client = mqtt.Client(client_id=client_id)
client.username_pw_set(username='unused', password=create_jwt(project_id, private_key_file, algorithm))
client.tls_set(ca_certs=ca_certs, tls_version=ssl.PROTOCOL_TLSv1_2)
client.on_connect = on_connect
client.on_publish = on_publish
client.on_disconnect = on_disconnect
client.on_message = on_message
client.connect(mqtt_bridge_hostname, mqtt_bridge_port)
mqtt_config_topic = '/devices/{}/config'.format(device_id)
client.subscribe(mqtt_config_topic, qos=1)
mqtt_command_topic = '/devices/{}/commands/#'.format(device_id)
print('Subscribing to {}'.format(mqtt_command_topic))
client.subscribe(mqtt_command_topic, qos=0)
return client<|docstring|>Create our MQTT client. The client_id is a unique string that identifies
this device. For Google Cloud IoT Core, it must be in the format below.<|endoftext|>
|
30e17cdb431112918d624a5cc72fe39e467aa68a06393e1f73832b41926652dd
|
def mqtt_device_demo(args):
'Connects a device, sends data, and receives data.'
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
sub_topic = ('events' if (args.message_type == 'event') else 'state')
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
for i in range(1, (args.num_messages + 1)):
client.loop()
if should_backoff:
if (minimum_backoff_time > MAXIMUM_BACKOFF_TIME):
print('Exceeded maximum backoff time. Giving up.')
break
delay = (minimum_backoff_time + (random.randint(0, 1000) / 1000.0))
print('Waiting for {} before reconnecting.'.format(delay))
time.sleep(delay)
minimum_backoff_time *= 2
client.connect(args.mqtt_bridge_hostname, args.mqtt_bridge_port)
payload = '{}/{}-payload-{}'.format(args.registry_id, args.device_id, i)
print("Publishing message {}/{}: '{}'".format(i, args.num_messages, payload))
seconds_since_issue = (datetime.datetime.utcnow() - jwt_iat).seconds
if (seconds_since_issue > (60 * jwt_exp_mins)):
print('Refreshing token after {}s'.format(seconds_since_issue))
jwt_iat = datetime.datetime.utcnow()
client.loop()
client.disconnect()
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
client.publish(mqtt_topic, payload, qos=1)
time.sleep(1)
|
Connects a device, sends data, and receives data.
|
pyclient/gcpIoTclient.py
|
mqtt_device_demo
|
lkk688/IoTCloudConnect
| 1 |
python
|
def mqtt_device_demo(args):
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
sub_topic = ('events' if (args.message_type == 'event') else 'state')
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
for i in range(1, (args.num_messages + 1)):
client.loop()
if should_backoff:
if (minimum_backoff_time > MAXIMUM_BACKOFF_TIME):
print('Exceeded maximum backoff time. Giving up.')
break
delay = (minimum_backoff_time + (random.randint(0, 1000) / 1000.0))
print('Waiting for {} before reconnecting.'.format(delay))
time.sleep(delay)
minimum_backoff_time *= 2
client.connect(args.mqtt_bridge_hostname, args.mqtt_bridge_port)
payload = '{}/{}-payload-{}'.format(args.registry_id, args.device_id, i)
print("Publishing message {}/{}: '{}'".format(i, args.num_messages, payload))
seconds_since_issue = (datetime.datetime.utcnow() - jwt_iat).seconds
if (seconds_since_issue > (60 * jwt_exp_mins)):
print('Refreshing token after {}s'.format(seconds_since_issue))
jwt_iat = datetime.datetime.utcnow()
client.loop()
client.disconnect()
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
client.publish(mqtt_topic, payload, qos=1)
time.sleep(1)
|
def mqtt_device_demo(args):
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
sub_topic = ('events' if (args.message_type == 'event') else 'state')
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
for i in range(1, (args.num_messages + 1)):
client.loop()
if should_backoff:
if (minimum_backoff_time > MAXIMUM_BACKOFF_TIME):
print('Exceeded maximum backoff time. Giving up.')
break
delay = (minimum_backoff_time + (random.randint(0, 1000) / 1000.0))
print('Waiting for {} before reconnecting.'.format(delay))
time.sleep(delay)
minimum_backoff_time *= 2
client.connect(args.mqtt_bridge_hostname, args.mqtt_bridge_port)
payload = '{}/{}-payload-{}'.format(args.registry_id, args.device_id, i)
print("Publishing message {}/{}: '{}'".format(i, args.num_messages, payload))
seconds_since_issue = (datetime.datetime.utcnow() - jwt_iat).seconds
if (seconds_since_issue > (60 * jwt_exp_mins)):
print('Refreshing token after {}s'.format(seconds_since_issue))
jwt_iat = datetime.datetime.utcnow()
client.loop()
client.disconnect()
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
client.publish(mqtt_topic, payload, qos=1)
time.sleep(1)<|docstring|>Connects a device, sends data, and receives data.<|endoftext|>
|
f52179646135c561b97cecb00462d9f83b29caf4ba3633e57cb6a7d556853eda
|
def storage_mqtt_device_demo(args):
'Connects a device, sends data, and receives data.'
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
sub_topic = ('events' if (args.message_type == 'event') else 'state')
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
storage_client = storage.Client.from_service_account_json(args.service_account_json)
bucketexist = storage_client.bucket('cmpelkk_imagetest')
i = 0
path = args.imagefolder_path
for filename in os.listdir(path):
if filename.endswith('.jpg'):
print(filename)
i += 1
bucketfilename = ('img%s.jpg' % i)
print(bucketfilename)
blobexist = bucketexist.blob(bucketfilename)
filepathlocal = os.path.join(path, filename)
print(filepathlocal)
blobexist.upload_from_filename(filepathlocal)
client.loop()
currentTime = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
(temp, hum, sensorZipCode, sensorLat, sensorLong) = read_sensor(i)
payloadJSON = createJSON(args.registry_id, args.device_id, currentTime, sensorZipCode, sensorLat, sensorLong, temp, hum, bucketfilename)
print("Publishing message {}/: '{}'".format(i, payloadJSON))
client.publish(mqtt_topic, payloadJSON, qos=1)
time.sleep(1)
continue
else:
continue
|
Connects a device, sends data, and receives data.
|
pyclient/gcpIoTclient.py
|
storage_mqtt_device_demo
|
lkk688/IoTCloudConnect
| 1 |
python
|
def storage_mqtt_device_demo(args):
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
sub_topic = ('events' if (args.message_type == 'event') else 'state')
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
storage_client = storage.Client.from_service_account_json(args.service_account_json)
bucketexist = storage_client.bucket('cmpelkk_imagetest')
i = 0
path = args.imagefolder_path
for filename in os.listdir(path):
if filename.endswith('.jpg'):
print(filename)
i += 1
bucketfilename = ('img%s.jpg' % i)
print(bucketfilename)
blobexist = bucketexist.blob(bucketfilename)
filepathlocal = os.path.join(path, filename)
print(filepathlocal)
blobexist.upload_from_filename(filepathlocal)
client.loop()
currentTime = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
(temp, hum, sensorZipCode, sensorLat, sensorLong) = read_sensor(i)
payloadJSON = createJSON(args.registry_id, args.device_id, currentTime, sensorZipCode, sensorLat, sensorLong, temp, hum, bucketfilename)
print("Publishing message {}/: '{}'".format(i, payloadJSON))
client.publish(mqtt_topic, payloadJSON, qos=1)
time.sleep(1)
continue
else:
continue
|
def storage_mqtt_device_demo(args):
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
sub_topic = ('events' if (args.message_type == 'event') else 'state')
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
storage_client = storage.Client.from_service_account_json(args.service_account_json)
bucketexist = storage_client.bucket('cmpelkk_imagetest')
i = 0
path = args.imagefolder_path
for filename in os.listdir(path):
if filename.endswith('.jpg'):
print(filename)
i += 1
bucketfilename = ('img%s.jpg' % i)
print(bucketfilename)
blobexist = bucketexist.blob(bucketfilename)
filepathlocal = os.path.join(path, filename)
print(filepathlocal)
blobexist.upload_from_filename(filepathlocal)
client.loop()
currentTime = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
(temp, hum, sensorZipCode, sensorLat, sensorLong) = read_sensor(i)
payloadJSON = createJSON(args.registry_id, args.device_id, currentTime, sensorZipCode, sensorLat, sensorLong, temp, hum, bucketfilename)
print("Publishing message {}/: '{}'".format(i, payloadJSON))
client.publish(mqtt_topic, payloadJSON, qos=1)
time.sleep(1)
continue
else:
continue<|docstring|>Connects a device, sends data, and receives data.<|endoftext|>
|
2e435e2f59461619f40b08bc6afb4c44a7b589c4e3d5dcf1ac6dcd0254aa9b25
|
def bigquery_mqtt_device_demo(args):
'Connects a device, sends data, and receives data.'
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
sub_topic = ('events' if (args.message_type == 'event') else 'state')
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
i = 0
for i in range(1, (args.num_messages + 1)):
bucketfilename = ('img%s.jpg' % i)
client.loop()
currentTime = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
(temp, hum, sensorZipCode, sensorLat, sensorLong) = read_sensor(i)
payloadJSON = createJSON(args.registry_id, args.device_id, currentTime, sensorZipCode, sensorLat, sensorLong, temp, hum, bucketfilename)
print("Publishing message {}/: '{}'".format(i, payloadJSON))
client.publish(mqtt_topic, payloadJSON, qos=1)
time.sleep(0.5)
|
Connects a device, sends data, and receives data.
|
pyclient/gcpIoTclient.py
|
bigquery_mqtt_device_demo
|
lkk688/IoTCloudConnect
| 1 |
python
|
def bigquery_mqtt_device_demo(args):
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
sub_topic = ('events' if (args.message_type == 'event') else 'state')
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
i = 0
for i in range(1, (args.num_messages + 1)):
bucketfilename = ('img%s.jpg' % i)
client.loop()
currentTime = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
(temp, hum, sensorZipCode, sensorLat, sensorLong) = read_sensor(i)
payloadJSON = createJSON(args.registry_id, args.device_id, currentTime, sensorZipCode, sensorLat, sensorLong, temp, hum, bucketfilename)
print("Publishing message {}/: '{}'".format(i, payloadJSON))
client.publish(mqtt_topic, payloadJSON, qos=1)
time.sleep(0.5)
|
def bigquery_mqtt_device_demo(args):
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
sub_topic = ('events' if (args.message_type == 'event') else 'state')
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
i = 0
for i in range(1, (args.num_messages + 1)):
bucketfilename = ('img%s.jpg' % i)
client.loop()
currentTime = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
(temp, hum, sensorZipCode, sensorLat, sensorLong) = read_sensor(i)
payloadJSON = createJSON(args.registry_id, args.device_id, currentTime, sensorZipCode, sensorLat, sensorLong, temp, hum, bucketfilename)
print("Publishing message {}/: '{}'".format(i, payloadJSON))
client.publish(mqtt_topic, payloadJSON, qos=1)
time.sleep(0.5)<|docstring|>Connects a device, sends data, and receives data.<|endoftext|>
|
49309f3cf6ecf0a6ae2cbe59bf7cb917c7d85b5e53422d9c43de668056ce4595
|
def mqtt_device_subdemo(args):
'Connects a device, sends data, and receives data.'
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
sub_topic = ('events' if (args.message_type == 'event') else 'state')
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
i = 0
for i in range(1, (args.num_messages + 1)):
client.loop()
time.sleep(1)
|
Connects a device, sends data, and receives data.
|
pyclient/gcpIoTclient.py
|
mqtt_device_subdemo
|
lkk688/IoTCloudConnect
| 1 |
python
|
def mqtt_device_subdemo(args):
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
sub_topic = ('events' if (args.message_type == 'event') else 'state')
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
i = 0
for i in range(1, (args.num_messages + 1)):
client.loop()
time.sleep(1)
|
def mqtt_device_subdemo(args):
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
sub_topic = ('events' if (args.message_type == 'event') else 'state')
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(args.project_id, args.cloud_region, args.registry_id, args.device_id, args.private_key_file, args.algorithm, args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
i = 0
for i in range(1, (args.num_messages + 1)):
client.loop()
time.sleep(1)<|docstring|>Connects a device, sends data, and receives data.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.