input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
# coding: utf-8
"""
Stitch Connect
https://www.stitchdata.com/docs/developers/stitch-connect/api # noqa: E501
The version of the OpenAPI document: 0.4.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from stitch_connect_client.configuration import Configuration
class StreamLevelMetadata(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"database_name": "str",
"forced_replication_method": "ForcedReplicationMethod",
"is_view": "bool",
"replication_key": "str",
"replication_method": "str",
"row_count": "int",
"schema_name": "str",
"selected": "bool",
"table_key_properties": "list[str]",
"valid_replication_keys": "list[str]",
"view_key_properties": "list[str]",
"tap_google_analytics_all_cubes": "list[str]",
}
attribute_map = {
"database_name": "database-name",
"forced_replication_method": "forced-replication-method",
"is_view": "is-view",
"replication_key": "replication-key",
"replication_method": "replication-method",
"row_count": "row-count",
"schema_name": "schema-name",
"selected": "selected",
"table_key_properties": "table-key-properties",
"valid_replication_keys": "valid-replication-keys",
"view_key_properties": "view-key-properties",
"tap_google_analytics_all_cubes": "tap_google_analytics.all_cubes",
}
def __init__(
self,
database_name=None,
forced_replication_method=None,
is_view=None,
replication_key=None,
replication_method=None,
row_count=None,
schema_name=None,
selected=None,
table_key_properties=None,
valid_replication_keys=None,
view_key_properties=None,
tap_google_analytics_all_cubes=None,
local_vars_configuration=None,
): # noqa: E501
"""StreamLevelMetadata - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._database_name = None
self._forced_replication_method = None
self._is_view = None
self._replication_key = None
self._replication_method = None
self._row_count = None
self._schema_name = None
self._selected = None
self._table_key_properties = None
self._valid_replication_keys = None
self._view_key_properties = None
self._tap_google_analytics_all_cubes = None
self.discriminator = None
if database_name is not None:
self.database_name = database_name
if forced_replication_method is not None:
self.forced_replication_method = forced_replication_method
if is_view is not None:
self.is_view = is_view
if replication_key is not None:
self.replication_key = replication_key
if replication_method is not None:
self.replication_method = replication_method
if row_count is not None:
self.row_count = row_count
if schema_name is not None:
self.schema_name = schema_name
if selected is not None:
self.selected = selected
if table_key_properties is not None:
self.table_key_properties = table_key_properties
if valid_replication_keys is not None:
self.valid_replication_keys = valid_replication_keys
if view_key_properties is not None:
self.view_key_properties = view_key_properties
if tap_google_analytics_all_cubes is not None:
self.tap_google_analytics_all_cubes = tap_google_analytics_all_cubes
@property
def database_name(self):
"""Gets the database_name of this StreamLevelMetadata. # noqa: E501
For database sources only. The name of the database containing the stream. # noqa: E501
:return: The database_name of this StreamLevelMetadata. # noqa: E501
:rtype: str
"""
return self._database_name
@database_name.setter
def database_name(self, database_name):
"""Sets the database_name of this StreamLevelMetadata.
For database sources only. The name of the database containing the stream. # noqa: E501
:param database_name: The database_name of this StreamLevelMetadata. # noqa: E501
:type: str
"""
self._database_name = database_name
@property
def forced_replication_method(self):
"""Gets the forced_replication_method of this StreamLevelMetadata. # noqa: E501
:return: The forced_replication_method of this StreamLevelMetadata. # noqa: E501
:rtype: ForcedReplicationMethod
"""
return self._forced_replication_method
@forced_replication_method.setter
def forced_replication_method(self, forced_replication_method):
"""Sets the forced_replication_method of this StreamLevelMetadata.
:param forced_replication_method: The forced_replication_method of this StreamLevelMetadata. # noqa: E501
:type: ForcedReplicationMethod
"""
self._forced_replication_method = forced_replication_method
@property
def is_view(self):
"""Gets the is_view of this StreamLevelMetadata. # noqa: E501
For database sources only. Indicates if the stream is a database view. # noqa: E501
:return: The is_view of this StreamLevelMetadata. # noqa: E501
:rtype: bool
"""
return self._is_view
@is_view.setter
def is_view(self, is_view):
"""Sets the is_view of this StreamLevelMetadata.
For database sources only. Indicates if the stream is a database view. # noqa: E501
:param is_view: The is_view of this StreamLevelMetadata. # noqa: E501
:type: bool
"""
self._is_view = is_view
@property
def replication_key(self):
"""Gets the replication_key of this StreamLevelMetadata. # noqa: E501
Indicates the field being used as the stream's Replication Key. # noqa: E501
:return: The replication_key of this StreamLevelMetadata. # noqa: E501
:rtype: str
"""
return self._replication_key
@replication_key.setter
def replication_key(self, replication_key):
"""Sets the replication_key of this StreamLevelMetadata.
Indicates the field being used as the stream's Replication Key. # noqa: E501
:param replication_key: The replication_key of this StreamLevelMetadata. # noqa: E501
:type: str
"""
self._replication_key = replication_key
@property
def replication_method(self):
"""Gets the replication_method of this StreamLevelMetadata. # noqa: E501
The Replication Method the stream uses to replicate data. Accepted values are: FULL_TABLE - The stream is using Full Table Replication INCREMENTAL - The stream is using Key-based Incremental Replication LOG_BASED - The stream is using Log-based Incremental Replication. Note: This method is only available for certain database sources, and requires additional setup to use. # noqa: E501
:return: The replication_method of this StreamLevelMetadata. # noqa: E501
:rtype: str
"""
return self._replication_method
@replication_method.setter
def replication_method(self, replication_method):
"""Sets the replication_method of this StreamLevelMetadata.
The Replication Method the stream uses to replicate data. Accepted values are: FULL_TABLE - The stream is using Full Table Replication INCREMENTAL - The stream is using Key-based Incremental Replication LOG_BASED - The stream is using Log-based Incremental Replication. Note: This method is only available for certain database sources, and requires additional setup to use. # noqa: E501
:param replication_method: The replication_method of this StreamLevelMetadata. # noqa: E501
:type: str
"""
allowed_values = ["FULL_TABLE", "INCREMENTAL", "LOG_BASED"] # noqa: E501
if (
self.local_vars_configuration.client_side_validation
and replication_method not in allowed_values
): # noqa: E501
raise ValueError(
"Invalid value for `replication_method` ({0}), must be one of {1}".format( # noqa: E501
replication_method, allowed_values
)
)
self._replication_method = replication_method
@property
def row_count(self):
"""Gets the row_count of this StreamLevelMetadata. # noqa: E501
For database sources only. The number of rows (records) in the stream. # noqa: E501
:return: The row_count of this StreamLevelMetadata. # noqa: E501
:rtype: int
"""
return self._row_count
@row_count.setter
def row_count(self, row_count):
"""Sets the row_count of this StreamLevelMetadata.
For database sources only. The number of rows (records) in the stream. # noqa: E501
:param row_count: The row_count of this StreamLevelMetadata. # noqa: E501
:type: int
"""
self._row_count = row_count
@property
def schema_name(self):
"""Gets the schema_name of this StreamLevelMetadata. # noqa: E501
For database sources only. The name of the schema containing the stream. # noqa: E501
:return: The schema_name of this StreamLevelMetadata. # noqa: E501
:rtype: str
"""
return self._schema_name
@schema_name.setter
def schema_name(self, schema_name):
"""Sets the schema_name of this StreamLevelMetadata.
For database sources only. The name of the schema containing the stream. # noqa: E501
:param schema_name: The schema_name of this StreamLevelMetadata. # noqa: E501
:type: str
"""
self._schema_name = schema_name
@property
def selected(self):
"""Gets the selected of this StreamLevelMetadata. # noqa: E501
Indicates whether a stream should be set to replicate. Accepted values are: true - The stream is selected and data for selected fields will be replicated false - The stream is not selected and no data will be replicated # noqa: E501
:return: The selected of this StreamLevelMetadata. # noqa: E501
:rtype: bool
"""
return self._selected
@selected.setter
def selected(self, selected):
"""Sets the selected of this StreamLevelMetadata.
Indicates whether a stream should be set to replicate. Accepted values are: true - The stream is selected and data for selected fields will be replicated false - The stream is not selected and no data will be replicated # noqa: E501
:param selected: The selected of this StreamLevelMetadata. # noqa: E501
:type: bool
"""
self._selected = selected
@property
def table_key_properties(self):
"""Gets the table_key_properties of this StreamLevelMetadata. # noqa: E501
An array of strings listing the fields that make up the key properties of the table. These are the table's defined Primary Keys. # noqa: E501
:return: The table_key_properties of this StreamLevelMetadata. # noqa: E501
:rtype: list[str]
"""
return self._table_key_properties
@table_key_properties.setter
def table_key_properties(self, table_key_properties):
"""Sets the table_key_properties of this StreamLevelMetadata.
An array of strings listing the fields that make up the key properties of the table. These are the table's defined Primary Keys. # noqa: E501
:param table_key_properties: The table_key_properties of this StreamLevelMetadata. # noqa: E501
:type: list[str]
"""
self._table_key_properties = table_key_properties
@property
def valid_replication_keys(self):
"""Gets the valid_replication_keys of this StreamLevelMetadata. # noqa: E501
An array of strings indicating the fields valid for use as Replication Keys in Key-based Incremental Replication (replication-method: INCREMENTAL). Note: For SaaS sources, the fields listed in this array are pre-defined by Stitch and will be used as the Replication Keys for the stream. They cannot be modified. # noqa: E501
:return: The valid_replication_keys | |
if center_pt is None:
center_pt = (663, 492)
self.center = center_pt
self.scale = np.sqrt(8)
self.true_center_x, self.true_center_y = self.position_xformer(self.center)
# print(self.position_xformer(self.center))
self.fish_angle = 0
self.center_x = self.true_center_x.copy()
self.center_y = self.true_center_y.copy()
self.bin_center_x = -1 * self.center_x * self.scale
self.bin_center_y = -1 * self.center_y * self.scale
# Set up profiling
self.profile_on = profile_on
if self.profile_on:
PStatClient.connect() # this will only work if pstats is running
ShowBaseGlobal.base.setFrameRateMeter(True) # Show frame rate
self.accept('next_stimulus', self.advance_stimulus)
self.accept('stat_stim', self.unset_stationary)
self.accept('begin_exp', self.begin_move)
self.accept('centering', self.centering_stimulus)
self.accept('adjust_center', self.adjust_center, [])
self.accept('center_position', self.center_pos_changes, [])
self.accept('live_thetas', self.change_theta, [])
self.accept('end_experiment', self.exp_end)
self.set_stimulus(self.current_stim)
self.automated = automated
if self.automated:
self.begin_move()
def exp_end(self):
# This is the end of the experiment
try:
with smtplib.SMTP_SSL('smtp.gmail.com', self.email_port, context=self.email_context) as server:
server.login(self.send_email, self.return_pass())
server.sendmail(self.send_email, self.receive_email, 'experiment finished rig 1')
except:
print('email sending failed')
print('Exp Finished!')
self.filestream.close()
final_saving(self.save_path)
# gw.getWindowsWithTitle(self.window_name)[0].close()
sys.exit()
def begin_move(self):
self.taskMgr.add(self.move_textures, "move textures")
def curr_params(self, curr_index):
try:
params = self.stimuli.loc[curr_index].copy()
except KeyError:
# This is the end of the experiment
try:
with smtplib.SMTP_SSL('smtp.gmail.com', self.email_port, context=self.email_context) as server:
server.login(self.send_email, self.return_pass())
server.sendmail(self.send_email, self.receive_email, 'experiment finished rig 1')
except:
print('email sending failed')
print('not enough stimuli')
final_saving(self.save_path)
# gw.getWindowsWithTitle(self.window_name)[0].close()
sys.exit()
return params
def unset_stationary(self):
self.current_stim['velocity'] = self.curr_params(self.curr_id)['velocity']
if self.filestream and self.current_stim['stat_time'] != 0:
saved_stim = dict(self.curr_params(self.curr_id).copy())
saved_stim.pop('texture')
self.filestream.write("\n")
self.filestream.write(f"{str(datetime.now())}: {self.curr_id} {saved_stim}")
self.filestream.flush()
def set_title(self, title):
self.window_properties.setTitle(title)
ShowBaseGlobal.base.win.requestProperties(self.window_properties)
def set_stimulus(self, stim):
if not self.stimulus_initialized:
self.stimulus_initialized = True
elif self.current_stim['stim_type'] == 'centering':
self.clear_cards()
self.current_stim = stim.copy()
if self.current_stim['stat_time'] != 0:
if self.current_stim['stim_type'] == 'b':
self.current_stim['velocity'] = (0, 0)
else:
self.current_stim['velocity'] = 0
# self.center_x = self.true_center_x
# self.center_y = self.true_center_y
# print(self.current_stim['angle'])
if self.current_stim['stim_type'] != 'centering':
print('showing:', self.current_stim['stim_name'])
self.create_texture_stages()
self.create_cards()
self.set_texture_stages()
self.set_transforms()
if self.filestream:
saved_stim = dict(self.current_stim.copy())
saved_stim.pop('texture')
self.filestream.write("\n")
self.filestream.write(f"{str(datetime.now())}: {self.curr_id} {saved_stim}")
self.filestream.flush()
def center_pos_changes(self, data):
self.true_center_x, self.true_center_y = self.position_xformer(data)
print('center is:', self.true_center_x, self.true_center_y)
self.center_x = self.true_center_x.copy()
self.center_y = self.true_center_y.copy()
self.bin_center_x = -1 * self.center_x * self.scale
self.bin_center_y = -1 * self.center_y * self.scale
self.set_transforms()
def change_theta(self, data):
# print(data)
# data = data * 180/np.pi
# self.strip_angle = self.reduce_to_pi(data + self.rotation_offset)
# self.fish_angle = self.reduce_to_pi(data)
# print(data)
self.strip_angle = data + self.rotation_offset
self.fish_angle = data
self.set_transforms()
self.set_transforms()
# print('changed theta to', self.strip_angle)
def adjust_center(self, data):
self.center_x, self.center_y = self.position_xformer(data)
self.bin_center_x = -1 * self.center_x * self.scale
self.bin_center_y = -1 * self.center_y * self.scale
# print('adjusted xy', data, self.center_x, self.center_y, self.bin_center_x, self.bin_center_y)
self.set_transforms()
def position_xformer(self, raw_pos, xy_flipped=False):
if xy_flipped:
_x = 1
_y = 0
else:
_x = 0
_y = 1
pos = (raw_pos[_x], raw_pos[_y])
conv_pt = cv2.transform(np.reshape(pos, (1, 1, 2)), self.calibrator)[0][0]
x = -1*((conv_pt[0]/self.window_size[0]) - 0.5)
y = -1*((conv_pt[1]/self.window_size[1]) - 0.5)
return x, y
def centering_stimulus(self):
# print('centering in pandas')
self._centering = True
if self.radial_centering:
self.curr_txt = self.centering_stack[self.centering_index]
self.clear_cards()
self.current_stim = {'stim_type' : 'centering', 'angle': 0, 'velocity':0, 'texture': self.curr_txt, 'stat_time':0}
self.set_stimulus(self.current_stim)
else:
self.clear_cards()
self.current_stim = {'stim_type' : 's', 'velocity' : 0, 'angle' : 0, 'texture': self.centering_stim, 'stat_time':0}
self.set_stimulus(self.current_stim)
# print(self.center_x, self.center_y)
if self.filestream:
saved_stim = self.current_stim.copy()
saved_stim.pop('texture')
self.filestream.write("\n")
self.filestream.write(f"{str(datetime.now())}: {self.curr_id} {saved_stim}")
self.filestream.flush()
def move_textures(self, task):
# moving the stimuli
# print(self.current_stim)
if self.current_stim['stim_type'] == 'b':
left_tex_position = -task.time * self.current_stim['velocity'][0] # negative b/c texture stage
right_tex_position = -task.time * self.current_stim['velocity'][1]
try:
self.left_card.setTexPos(self.left_texture_stage, left_tex_position, 0, 0)
self.right_card.setTexPos(self.right_texture_stage, right_tex_position, 0, 0)
except Exception as e:
print('error on move_texture_b')
elif self.current_stim['stim_type'] == 's':
if self.current_stim['velocity'] == 0:
pass
else:
new_position = -task.time*self.current_stim['velocity']
# Sometimes setting position fails when the texture stage isn't fully set
try:
self.card.setTexPos(self.texture_stage, new_position, 0, 0) #u, v, w
except Exception as e:
print('error on move_texture_s')
elif self.current_stim['stim_type'] == 'rdk' and self.dots_made:
dt = task.time - self.last_time
self.last_time = task.time
# because this isnt the 2D card, lets set up a lens to see it
self.lens = PerspectiveLens()
self.lens.setFov(90, 90)
self.lens.setNearFar(0.001, 1000)
self.lens.setAspectRatio(1)
self.cam.node().setLens(self.lens)
# ???
random_vector = np.random.randint(100, size=10000)
self.coherent_change_vector_ind = np.where(random_vector < self.current_stim['coherence'])
#######
# Continously update the dot stimulus
#####
self.dots_position[0, :, 0][self.coherent_change_vector_ind] += \
np.cos(self.current_stim['angle'] * np.pi / 180) * self.current_stim['velocity'] * dt
self.dots_position[0, :, 1][self.coherent_change_vector_ind] += \
np.sin(self.current_stim['angle'] * np.pi / 180) * self.current_stim['velocity'] * dt
# Randomly redraw dot with a short lifetime
k = np.random.random(10000)
if self.current_stim['lifetime'] == 0:
ind = np.where(k >= 0)[0]
else:
ind = np.where(k < dt / self.current_stim['lifetime'])[0]
self.dots_position[0, :, 0][ind] = 2 * np.random.random(len(ind)).astype(np.float32) - 1 # x
self.dots_position[0, :, 1][ind] = 2 * np.random.random(len(ind)).astype(np.float32) - 1 # y
self.dots_position[0, :, 2] = np.ones(10000) * self.current_stim['brightness']
# Wrap them
self.dots_position[0, :, 0] = (self.dots_position[0, :, 0] + 1) % 2 - 1
self.dots_position[0, :, 1] = (self.dots_position[0, :, 1] + 1) % 2 - 1
memoryview(self.dummytex.modify_ram_image())[:] = self.dots_position.tobytes()
elif self.current_stim['stim_type'] == 'centering' and self.radial_centering:
# this value is modifiable to change speed of radial sine
if task.time > 1.75:
self.clear_cards()
#print('showing centering index', self.centering_index)
self.current_stim['texture'] = self.centering_stack[self.centering_index]
# self.centering_stack[self.centering_index].view()
self.set_stimulus(self.current_stim)
self.centering_index += 1
if self.centering_index == self.centering_stack_size:
self.centering_index = 0
return task.cont
def advance_stimulus(self):
self._centering = False
try:
self.curr_id += 1
self.clear_cards()
self.current_stim = self.curr_params(self.curr_id)
self.set_stimulus(self.current_stim)
except IndexError:
self.filestream.close()
final_saving(self.save_path)
sys.exit()
def create_texture_stages(self):
"""
Create the texture stages: these are basically textures that you can apply
to cards (sometimes mulitple textures at the same time -- is useful with
masks).
For more on texture stages:
https://docs.panda3d.org/1.10/python/programming/texturing/multitexture-introduction
"""
# Binocular cards
if self.current_stim['stim_type'] == 'b':
# TEXTURE STAGES FOR LEFT CARD
# Texture itself
self.left_texture_stage = TextureStage('left_texture_stage')
# Mask
self.left_mask = Texture("left_mask_texture")
self.left_mask.setup2dTexture(self.current_stim['texture'].texture_size[0],
self.current_stim['texture'].texture_size[1],
Texture.T_unsigned_byte, Texture.F_luminance)
self.left_mask_stage = TextureStage('left_mask_array')
# TEXTURE STAGES FOR RIGHT CARD
self.right_texture_stage = TextureStage('right_texture_stage')
# Mask
self.right_mask = Texture("right_mask_texture")
self.right_mask.setup2dTexture(self.current_stim['texture'].texture_size[0],
self.current_stim['texture'].texture_size[1],
Texture.T_unsigned_byte, Texture.F_luminance)
self.right_mask_stage = TextureStage('right_mask_stage')
# monocular cards
elif self.current_stim['stim_type'] == 's':
self.texture_stage = TextureStage("texture_stage")
# random dots are special cards because they are actually full panda3d models with a special lens to appear 2D
# NOT the 2D card based textures the others are based on
elif self.current_stim['stim_type'] == 'rdk':
self.dot_motion_coherence_shader = [
""" #version 140
uniform sampler2D p3d_Texture0;
uniform mat4 p3d_ModelViewProjectionMatrix;
in vec4 p3d_Vertex;
in vec2 p3d_MultiTexCoord0;
uniform int number_of_dots;
uniform float size_of_dots;
uniform float radius;
out float dot_color;
void main(void) {
vec4 newvertex;
float dot_i;
float dot_x, dot_y;
float maxi = 10000.0;
vec4 dot_properties;
dot_i = float(p3d_Vertex[1]);
dot_properties = texture2D(p3d_Texture0, vec2(dot_i/maxi, 0.0));
dot_x = dot_properties[2];
dot_y = dot_properties[1];
dot_color = dot_properties[0];
newvertex = p3d_Vertex;
if (dot_x*dot_x + dot_y*dot_y > radius*radius || dot_i > number_of_dots) { // only plot a certain number of dots in a circle
newvertex[0] = 0.0;
newvertex[1] = 0.0;
newvertex[2] = 0.0;
} else {
newvertex[0] = p3d_Vertex[0]*size_of_dots+dot_x;
newvertex[1] = 0.75;
newvertex[2] = p3d_Vertex[2]*size_of_dots+dot_y;
}
gl_Position = p3d_ModelViewProjectionMatrix * newvertex;
}
""",
""" #version 140
in float dot_color;
//out vec4 gl_FragColor;
void main() {
gl_FragColor = vec4(dot_color, dot_color, dot_color, 1);
}
"""
]
self.compiled_dot_motion_shader = Shader.make(Shader.SLGLSL, self.dot_motion_coherence_shader[0],
self.dot_motion_coherence_shader[1])
self.circles = self.loader.loadModel('circles.bam')
self.dummytex = Texture("dummy texture") # this doesn't have an associated texture (as above)
self.dummytex.setup2dTexture(10000, 1, Texture.T_float, Texture.FRgb32)
self.dummytex.setMagfilter(Texture.FTNearest)
tex = TextureStage("dummy followup")
tex.setSort(-100) # ???
self.circles.setTexture(tex, self.dummytex)
self.circles.setShader(self.compiled_dot_motion_shader)
elif self.current_stim['stim_type'] == 'centering':
self.texture_stage = TextureStage('texture_stage')
def return_pass(self):
import pandastim.experiments.matt as matt
return matt.password
def create_cards(self):
"""
Create cards: these are panda3d objects that are required for displaying textures.
You can't just have a disembodied texture. In pandastim (at least for now) we are
only showing 2d projections of textures, so we use cards.
"""
cardmaker = CardMaker("stimcard")
cardmaker.setFrameFullscreenQuad()
# Binocular cards
if self.current_stim['stim_type'] == 'b':
self.setBackgroundColor((0, 0, 0, 1)) # without this the cards will appear washed out
self.left_card = self.aspect2d.attachNewNode(cardmaker.generate())
self.left_card.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.M_add)) # otherwise only right card shows
self.right_card = self.aspect2d.attachNewNode(cardmaker.generate())
self.right_card.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.M_add))
# Tex card
elif self.current_stim['stim_type'] == 's':
self.card = self.aspect2d.attachNewNode(cardmaker.generate())
self.card.setColor((1, 1, 1, 1))
self.card.setScale(self.scale)
elif self.current_stim['stim_type'] == 'centering':
self.card = self.aspect2d.attachNewNode(cardmaker.generate())
self.card.setColor((1, 1, 1, 1)) # ?
# self.setBackgroundColor((0, 0, 0, 1))
self.card.setScale(self.scale)
self.center_card_created = True
# attach model to card w/ the rdk stimulus
elif self.current_stim['stim_type'] == 'rdk':
self.card = self.render.attachNewNode('dumb node')
self.circles.reparentTo(self.card)
self.circles.setShaderInput("number_of_dots", int(self.current_stim['number']))
self.circles.setShaderInput("size_of_dots", self.current_stim['size'])
self.circles.setShaderInput("radius", self.current_stim['window'])
self.setBackgroundColor(0, 0, 0, 1)
def set_texture_stages(self):
"""
Add texture stages to cards
"""
if self.current_stim['stim_type'] == 'b':
# self.mask_position_uv = (self.bin_center_x, self.bin_center_y)
# CREATE MASK ARRAYS
self.left_mask_array | |
0.02
2 5.80e+05 754.18 | 648.31 153.8 139 28 | 0.29 0.22 21.32 0.02
2 5.93e+05 754.18 | 687.98 0.0 154 0 | 0.30 0.23 19.56 0.02
2 6.02e+05 754.18 | 433.99 0.0 95 0 | 0.30 0.23 20.28 0.02
2 6.11e+05 754.18 | 592.46 233.4 127 41 | 0.30 0.23 21.65 0.02
2 6.19e+05 754.18 | 443.31 0.0 89 0 | 0.30 0.23 20.45 0.02
2 6.28e+05 754.18 | 630.86 127.6 124 24 | 0.30 0.23 21.10 0.02
2 6.37e+05 844.03 |
2 6.37e+05 844.03 | 844.03 237.1 164 45 | 0.31 0.24 21.83 0.02
2 6.46e+05 995.82 |
2 6.46e+05 995.82 | 995.82 296.4 204 53 | 0.31 0.24 21.79 0.02
2 6.55e+05 995.82 | 704.54 0.0 146 0 | 0.31 0.25 22.99 0.02
2 6.64e+05 995.82 | 597.17 0.0 120 0 | 0.31 0.25 23.18 0.02
2 6.73e+05 995.82 | 578.13 0.0 127 0 | 0.31 0.26 23.02 0.02
2 6.82e+05 995.82 | 985.69 0.0 188 0 | 0.30 0.26 22.20 0.02
2 6.90e+05 1329.88 |
2 6.90e+05 1329.88 | 1329.88 453.5 271 86 | 0.30 0.26 21.81 0.02
2 6.99e+05 1329.88 | 984.74 242.2 202 52 | 0.30 0.26 24.70 0.02
2 7.08e+05 1329.88 | 1282.09 424.1 252 80 | 0.31 0.26 24.36 0.02
2 7.17e+05 1609.77 |
2 7.17e+05 1609.77 | 1609.77 986.8 321 199 | 0.30 0.26 25.79 0.02
2 7.26e+05 1835.22 |
2 7.26e+05 1835.22 | 1835.22 846.6 361 168 | 0.31 0.26 22.76 0.02
2 7.35e+05 1835.22 | 1455.07 0.0 301 0 | 0.31 0.27 24.73 0.02
2 7.45e+05 1835.22 | 1424.00 0.0 284 0 | 0.31 0.26 25.26 0.02
2 7.54e+05 1835.22 | 1585.16 471.9 319 96 | 0.31 0.26 24.13 0.02
2 7.63e+05 1835.22 | 502.12 0.0 99 0 | 0.30 0.27 24.48 0.02
2 7.72e+05 1957.16 |
2 7.72e+05 1957.16 | 1957.16 1026.3 374 191 | 0.31 0.27 25.86 0.02
2 7.81e+05 1957.16 | 518.58 0.0 106 0 | 0.31 0.27 25.86 0.02
2 7.91e+05 1957.16 | 1099.69 0.0 223 0 | 0.31 0.27 25.92 0.02
2 8.01e+05 1957.16 | 1239.12 0.0 246 0 | 0.31 0.27 26.76 0.02
2 8.10e+05 1957.16 | 1706.44 0.0 318 0 | 0.31 0.27 25.48 0.02
2 8.20e+05 1957.16 | 1875.97 612.2 369 110 | 0.31 0.27 26.70 0.02
2 8.30e+05 1957.16 | 797.77 0.0 158 0 | 0.31 0.28 26.81 0.02
2 8.39e+05 1957.16 | 1699.40 0.0 339 0 | 0.31 0.27 26.47 0.02
2 8.48e+05 1957.16 | 854.85 0.0 166 0 | 0.32 0.28 28.31 0.02
2 8.58e+05 1957.16 | 1492.19 0.0 290 0 | 0.32 0.28 25.77 0.02
2 8.67e+05 1957.16 | 1687.29 0.0 332 0 | 0.31 0.28 26.35 0.02
2 8.76e+05 2319.36 |
2 8.76e+05 2319.36 | 2319.36 1121.7 436 208 | 0.31 0.28 27.42 0.02
2 8.86e+05 2319.36 | 1523.18 0.0 287 0 | 0.32 0.28 27.89 0.02
2 8.95e+05 3929.77 |
2 8.95e+05 3929.77 | 3929.77 1606.8 757 308 | 0.31 0.28 28.16 0.02
2 9.05e+05 3929.77 | 1623.65 0.0 312 0 | 0.31 0.28 27.18 0.02
2 9.15e+05 3929.77 | 3908.78 0.0 752 0 | 0.32 0.28 28.58 0.02
2 9.25e+05 3929.77 | 1302.67 0.0 258 0 | 0.32 0.27 29.20 0.02
2 9.35e+05 3929.77 | 1261.39 0.0 245 0 | 0.32 0.27 27.75 0.02
2 9.45e+05 3929.77 | 759.79 0.0 169 0 | 0.31 0.27 28.00 0.02
2 9.55e+05 3929.77 | 2116.41 0.0 402 0 | 0.31 0.26 28.82 0.02
2 9.65e+05 3929.77 | 1699.71 0.0 315 0 | 0.32 0.27 30.17 0.02
2 9.74e+05 3929.77 | 726.84 0.0 160 0 | 0.32 0.27 28.64 0.02
2 9.84e+05 4231.71 |
2 9.84e+05 4231.71 | 4231.71 1425.9 799 257 | 0.32 0.27 28.88 0.02
2 9.94e+05 4231.71 | 2501.40 1341.9 474 253 | 0.31 0.27 30.48 0.02
2 1.00e+06 4231.71 | 3515.81 1836.7 660 333 | 0.32 0.27 30.17 0.02
2 1.01e+06 4231.71 | 828.37 0.0 171 0 | 0.33 0.27 30.49 0.02
2 1.02e+06 4231.71 | 1462.77 0.0 270 0 | 0.32 0.27 31.05 0.02
2 1.03e+06 4231.71 | 1928.92 0.0 356 0 | 0.33 0.26 31.43 0.02
2 1.04e+06 4231.71 | 609.80 0.0 128 0 | 0.32 0.26 30.16 0.02
2 1.05e+06 4231.71 | 3092.45 1241.4 596 244 | 0.32 0.27 31.47 0.02
2 1.06e+06 4231.71 | 1380.05 0.0 261 0 | 0.32 0.25 32.00 0.02
2 1.07e+06 4231.71 | 2293.69 0.0 423 0 | 0.32 0.26 32.25 0.02
2 1.08e+06 4231.71 | 904.86 0.0 180 0 | 0.32 0.25 32.23 0.02
2 1.10e+06 4231.71 | 1727.42 0.0 351 0 | 0.32 0.25 31.05 0.02
2 1.11e+06 4231.71 | 2375.34 0.0 441 0 | 0.33 0.25 30.80 0.02
2 1.12e+06 4231.71 | 3723.47 0.0 684 0 | 0.33 0.25 32.45 0.02
2 1.13e+06 4231.71 | 4135.63 1886.7 768 342 | 0.32 0.25 31.90 0.02
2 1.14e+06 4231.71 | 765.19 0.0 142 0 | 0.33 0.24 32.01 0.02
2 1.15e+06 4231.71 | 1945.57 0.0 366 0 | 0.33 0.24 34.65 0.02
2 1.16e+06 4231.71 | 2542.68 1734.2 465 311 | 0.32 0.24 32.80 0.02
2 1.17e+06 4231.71 | 2590.14 0.0 471 0 | 0.32 0.24 31.36 0.02
2 1.18e+06 4231.71 | 2468.01 0.0 447 0 | 0.33 0.25 33.19 0.02
2 1.19e+06 4231.71 | 1560.16 0.0 285 0 | 0.33 0.24 31.76 0.02
2 1.19e+06 4231.71 | 223.32 0.0 43 0 | 0.33 0.24 34.08 0.02
2 1.20e+06 4231.71 | 123.22 0.0 25 0 | 0.33 0.25 32.51 0.02
2 1.21e+06 4231.71 | 1063.34 0.0 205 0 | 0.32 0.24 33.16 0.02
2 1.21e+06 4231.71 | 2342.22 2161.3 434 392 | 0.32 0.24 32.86 0.02
2 1.22e+06 4231.71 | 1376.66 0.0 254 0 | 0.33 0.24 33.61 0.02
2 1.22e+06 4231.71 | 1939.27 0.0 345 0 | 0.33 0.24 32.21 0.02
2 1.23e+06 4231.71 | 1450.92 0.0 282 0 | 0.33 0.23 34.04 0.02
2 1.23e+06 4231.71 | 3652.45 0.0 657 0 | 0.34 0.24 34.21 0.02
2 1.24e+06 4231.71 | 1252.34 0.0 240 0 | 0.33 0.24 33.65 0.02
2 1.24e+06 4231.71 | 1768.35 0.0 326 0 | 0.34 0.24 33.81 0.02
2 1.25e+06 4231.71 | 1952.18 0.0 350 0 | 0.33 0.24 34.21 0.02
2 1.25e+06 4231.71 | 1479.76 0.0 278 0 | 0.33 0.23 34.37 0.02
2 1.26e+06 4231.71 | 4231.34 0.0 757 0 | 0.34 0.24 33.61 0.02
2 1.26e+06 4231.71 | 4202.22 0.0 755 0 | 0.33 0.23 32.39 0.02
2 1.27e+06 4231.71 | 3597.61 2100.9 658 379 | 0.33 0.24 32.62 0.02
2 1.27e+06 4231.71 | 430.31 0.0 92 0 | 0.33 0.23 34.13 0.02
2 1.28e+06 4231.71 | 2833.13 1841.9 511 320 | 0.33 0.23 34.23 0.01
2 1.28e+06 4351.07 |
2 1.28e+06 4351.07 | 4351.07 1734.1 778 304 | 0.33 0.23 32.08 0.01
2 1.29e+06 4351.07 | 2608.48 0.0 470 0 | 0.33 0.24 34.04 0.01
2 1.29e+06 4351.07 | 2364.01 1791.9 428 311 | 0.33 0.23 34.24 0.01
2 1.30e+06 4351.07 | 3731.13 1500.9 686 270 | 0.33 0.23 35.38 0.01
2 1.30e+06 4351.07 | 805.48 0.0 158 0 | 0.33 0.23 31.95 0.01
2 1.31e+06 4351.07 | 491.94 0.0 109 0 | 0.33 0.23 33.86 0.01
2 1.31e+06 4351.07 | 1352.71 0.0 244 0 | 0.33 0.22 33.14 0.01
2 1.32e+06 4351.07 | 1908.86 0.0 352 0 | 0.33 0.23 32.84 0.01
2 1.32e+06 4351.07 | 950.31 0.0 171 0 | 0.33 0.22 34.09 0.01
2 1.33e+06 4351.07 | 639.88 0.0 124 0 | 0.33 0.22 32.27 0.01
2 1.34e+06 4351.07 | 2889.20 0.0 515 0 | 0.33 0.22 34.52 0.01
2 1.34e+06 4351.07 | 3991.38 0.0 768 0 | 0.33 0.22 33.87 0.01
2 1.35e+06 4351.07 | 3834.92 0.0 699 0 | 0.34 0.23 35.08 0.01
2 1.35e+06 4351.07 | 1375.02 0.0 253 0 | 0.33 0.23 33.69 0.01
2 1.35e+06 4351.07 | | |
import torch.nn as nn
import torch.nn.functional as F
from .position_encoding import *
from typing import Optional
from torch import Tensor
import copy
def aligned_bilinear(tensor, factor):
assert tensor.dim() == 4
assert factor >= 1
assert int(factor) == factor
if factor == 1:
return tensor
h, w = tensor.size()[2:]
tensor = F.pad(tensor, pad=(0, 1, 0, 1), mode="replicate")
oh = factor * h + 1
ow = factor * w + 1
tensor = F.interpolate(
tensor, size=(oh, ow),
mode='bilinear',
align_corners=True
)
tensor = F.pad(
tensor, pad=(factor // 2, 0, factor // 2, 0),
mode="replicate"
)
return tensor[:, :, :oh - 1, :ow - 1]
class Decoder(nn.Module):
def __init__(
self,
norm_layer=nn.BatchNorm2d
):
super(Decoder, self).__init__()
fpn_dims = [256, 512, 1024, 2048]
dec_dim = 128
self.refine_spa = nn.ModuleList()
for dim in fpn_dims:
self.refine_spa.append(nn.Sequential(
nn.Conv2d(dim, dec_dim, kernel_size=3, padding=1, bias=False),
norm_layer(dec_dim),
nn.ReLU(inplace=True)
))
self.refine_tem = nn.ModuleList()
for dim in fpn_dims:
self.refine_tem.append(nn.Sequential(
nn.Conv2d(dim, dec_dim, kernel_size=3, padding=1, bias=False),
norm_layer(dec_dim),
nn.ReLU(inplace=True)
))
num_conv_tower = 4
spa_tower = []
for _ in range(num_conv_tower):
spa_tower.append(nn.Sequential(
nn.Conv2d(dec_dim, dec_dim, kernel_size=3, padding=1, bias=False),
norm_layer(dec_dim),
nn.ReLU(inplace=True)
))
spa_tower.append(nn.Conv2d(dec_dim, 1, kernel_size=1))
self.add_module('spa_tower', nn.Sequential(*spa_tower))
tem_tower = []
for _ in range(num_conv_tower):
tem_tower.append(nn.Sequential(
nn.Conv2d(dec_dim, dec_dim, kernel_size=3, padding=1, bias=False),
norm_layer(dec_dim),
nn.ReLU(inplace=True)
))
tem_tower.append(nn.Conv2d(dec_dim, 1, kernel_size=1))
self.add_module('tem_tower', nn.Sequential(*tem_tower))
self.fusion = Fusion(dec_dim, dec_dim, norm_layer)
fuse_tower = []
for _ in range(num_conv_tower):
fuse_tower.append(nn.Sequential(
nn.Conv2d(dec_dim, dec_dim, kernel_size=3, padding=1, bias=False),
norm_layer(dec_dim),
nn.ReLU(inplace=True)
))
fuse_tower.append(nn.Conv2d(dec_dim, 1, kernel_size=1))
self.add_module('fuse_tower', nn.Sequential(*fuse_tower))
self.mask_head = nn.Conv2d(3, 1, kernel_size=3, padding=1)
def forward(self, spa_feats, tem_feats, sent):
for i in range(len(spa_feats)):
if i == 0:
spa = self.refine_spa[i](spa_feats[i])
tem = self.refine_tem[i](tem_feats[i])
else:
spa_p = self.refine_spa[i](spa_feats[i])
tem_p = self.refine_tem[i](tem_feats[i])
target_h, target_w = spa.size()[2:]
h, w = spa_p.size()[2:]
assert target_h % h == 0
assert target_w % w == 0
factor_h, factor_w = target_h // h, target_w // w
assert factor_h == factor_w
spa_p = aligned_bilinear(spa_p, factor_h)
tem_p = aligned_bilinear(tem_p, factor_h)
spa = spa + spa_p
tem = tem + tem_p
z = self.fusion(spa, tem, sent)
pred = self.fuse_tower(z)
pred = aligned_bilinear(pred, 4).squeeze()
return pred
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.")
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
dim_feedforward=512,
dropout=0.1,
activation="relu",
normalize_before=True,
):
super().__init__()
self.self_attn_words = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.cross_attn_img = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.cross_attn_flo = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm_words_1 = nn.LayerNorm(d_model)
self.norm_words_2 = nn.LayerNorm(d_model)
self.norm_flo = nn.LayerNorm(d_model)
self.norm_flo2 = nn.LayerNorm(d_model)
self.dropout_words = nn.Dropout(dropout)
self.dropout_cross_img = nn.Dropout(dropout)
self.dropout_cross_flo = nn.Dropout(dropout)
self.dropout_flo2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward(self, img, flo, words, pos_embed_img, pos_embed_txt, key_padding_mask_img, key_padding_mask_txt, vis=False):
# img: [H*W, B, C]
# flo: [H*W, B, C]
# words: [N, B, C]
# pos_embed_img: [H*W, B, C]
# pos_embed_txt: [N, B, C]
# key_padding_mask_img: [B, H*W]
# key_padding_mask_txt: [B. N]
query_words = key_words = self.with_pos_embed(words, pos_embed_txt)
vis_map = {}
if not vis:
words_attn, _ = self.self_attn_words(query_words, key_words, value=words, key_padding_mask=key_padding_mask_txt)
else:
words_attn, tmp = self.self_attn_words(query_words, key_words, value=words, key_padding_mask=key_padding_mask_txt)
vis_map['self_words_attn'] = tmp
words = words + self.dropout_words(words_attn)
words = self.norm_words_1(words)
key_img = value_img = self.with_pos_embed(img, pos_embed_img)
words_cross_attn, _ = self.cross_attn_img(words, key_img, value=value_img,
key_padding_mask=key_padding_mask_img)
words = words + self.dropout_cross_img(words_cross_attn)
words = self.norm_words_2(words)
query_flo = self.with_pos_embed(flo, pos_embed_img)
if not vis:
flo_cross_attn, _ = self.cross_attn_flo(query_flo, words, value=words, key_padding_mask=key_padding_mask_txt)
else:
flo_cross_attn, tmp = self.cross_attn_flo(query_flo, words, value=words, key_padding_mask=key_padding_mask_txt)
vis_map['cross_words_attn'] = tmp
flo = flo + self.dropout_cross_flo(flo_cross_attn)
flo = self.norm_flo(flo)
flo2 = self.linear2(self.dropout(self.activation(self.linear1(flo))))
flo2 = flo + self.dropout_flo2(flo2)
flo2 = self.norm_flo2(flo2)
if not vis:
return flo2
else:
return flo2, vis_map
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class TransformerEncoder(nn.Module):
def __init__(self, num_layers):
super().__init__()
self.layers = nn.ModuleList()
for _ in range(num_layers):
self.layers.append(TransformerEncoderLayer())
self.num_layers = num_layers
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward(self, img, flo, words, pos_embed_img, pos_embed_txt, key_padding_mask_img, key_padding_mask_txt, vis=False):
# img: [H*W, B, C]
# flo: [H*W, B, C]
# words: [N, B, C]
# pos_embed_img: [H*W, B, C]
# pos_embed_txt: [N, B, C]
# key_padding_mask_img: [B, H*W]
# key_padding_mask_txt: [B. N]
vis_map = {}
for layer in self.layers:
src_flo = flo
src_img = img
if not vis:
flo = layer(src_img, src_flo, words, pos_embed_img, pos_embed_txt, key_padding_mask_img,
key_padding_mask_txt)
img = layer(src_flo, src_img, words, pos_embed_img, pos_embed_txt, key_padding_mask_img,
key_padding_mask_txt)
else:
flo, vis_map_flo = layer(src_img, src_flo, words, pos_embed_img, pos_embed_txt, key_padding_mask_img,
key_padding_mask_txt, vis)
img, vis_map_img = layer(src_flo, src_img, words, pos_embed_img, pos_embed_txt, key_padding_mask_img,
key_padding_mask_txt, vis)
if not vis:
return img, flo
else:
vis_map['flo'] = vis_map_flo
vis_map['img'] = vis_map_img
return img, flo, vis_map
class Fusion(nn.Module):
def __init__(
self,
input_dim,
feature_dim,
norm_layer,
):
super(Fusion, self).__init__()
self.img_flo_fc = nn.Sequential(
nn.Linear(input_dim * 2, feature_dim),
nn.ReLU(inplace=True)
)
self.img_txt_fc = nn.Linear(feature_dim + 512, input_dim)
self.flo_txt_fc = nn.Linear(feature_dim + 512, input_dim)
self.img_enhance_fc = nn.Linear(feature_dim, feature_dim)
self.flo_enhance_fc = nn.Linear(feature_dim, feature_dim)
self.fusion_cat_conv = nn.Sequential(
nn.Conv2d(input_dim * 2, input_dim, kernel_size=3, padding=1, bias=False),
norm_layer(input_dim),
nn.ReLU(inplace=True),
)
def forward(
self,
image,
flow,
txt
):
img_avg = image.flatten(2).mean(dim=2)
flo_avg = flow.flatten(2).mean(dim=2)
# [B, C]
img_avg = img_avg.unsqueeze(1)
flo_avg = flo_avg.unsqueeze(1)
# [B, 1, C]
img_flo = torch.cat([img_avg, flo_avg], dim=2)
# [B, 1, 2C]
img_flo = F.relu(self.img_flo_fc(img_flo))
# [B, 1, c]
img_txt = torch.cat([img_avg, txt], dim=2)
# [B, 1, c+512]
img_txt_gate = torch.sigmoid(self.img_txt_fc(img_txt))
flo_txt = torch.cat([flo_avg, txt], dim=2)
flo_txt_gate = torch.sigmoid(self.flo_txt_fc(flo_txt))
img_txt_gate = img_txt_gate.squeeze(1).unsqueeze(2).unsqueeze(3)
flo_txt_gate = flo_txt_gate.squeeze(1).unsqueeze(2).unsqueeze(3)
image = image * img_txt_gate
flow = flow * flo_txt_gate
#
img_enhance = torch.sigmoid(self.img_enhance_fc(img_flo))
flo_enhance = torch.sigmoid(self.flo_enhance_fc(img_flo))
img_enhance = img_enhance.squeeze(1).unsqueeze(2).unsqueeze(3)
flo_enhance = flo_enhance.squeeze(1).unsqueeze(2).unsqueeze(3)
# # [B, c, 1, 1]
image = image * img_enhance
flow = flow * flo_enhance
# image = image * img_txt_gate.squeeze(1).unsqueeze(2).unsqueeze(3)
# flow = flow * flo_txt_gate.squeeze(1).unsqueeze(2).unsqueeze(3)
fusion_cat = torch.cat([image, flow], dim=1)
fusion_cat = self.fusion_cat_conv(fusion_cat)
return fusion_cat
class JointModel(nn.Module):
def __init__(
self,
image_encoder=None,
flow_encoder=None,
num_layers=1,
norm_layer=nn.BatchNorm2d,
):
super(JointModel, self).__init__()
resnet_im = image_encoder
self.conv1_1 = resnet_im.conv1
self.bn1_1 = resnet_im.bn1
self.relu_1 = resnet_im.relu
self.maxpool_1 = resnet_im.maxpool
self.res2_1 = resnet_im.layer1
self.res3_1 = resnet_im.layer2
self.res4_1 = resnet_im.layer3
self.res5_1 = resnet_im.layer4
resnet_fl = flow_encoder
self.conv1_2 = resnet_fl.conv1
self.bn1_2 = resnet_fl.bn1
self.relu_2 = resnet_fl.relu
self.maxpool_2 = resnet_fl.maxpool
self.res2_2 = resnet_fl.layer1
self.res3_2 = resnet_fl.layer2
self.res4_2 = resnet_fl.layer3
self.res5_2 = resnet_fl.layer4
self.text_encoder = TextEncoder(num_layers=num_layers)
self.decoder = Decoder()
self.conv_r5_1_reduce = nn.Sequential(
nn.Conv2d(2048, 512, kernel_size=1, bias=False),
norm_layer(512),
)
self.conv_r5_2_reduce = nn.Sequential(
nn.Conv2d(2048, 512, kernel_size=1, bias=False),
norm_layer(512),
)
self.transformer = nn.ModuleDict()
self.transformer['stage4'] = TransformerEncoder(4)
self.transformer['stage5'] = TransformerEncoder(4)
self.conv_r4_1_reduce = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=1, bias=False),
norm_layer(512),
)
self.conv_r4_2_reduce = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=1, bias=False),
norm_layer(512),
)
self.conv_r4_1_up = nn.Sequential(
nn.Conv2d(512, 1024, kernel_size=1, bias=False),
norm_layer(1024),
)
self.conv_r4_2_up = nn.Sequential(
nn.Conv2d(512, 1024, kernel_size=1, bias=False),
norm_layer(1024),
)
self.conv_r5_1_up = nn.Sequential(
nn.Conv2d(512, 2048, kernel_size=1, bias=False),
norm_layer(2048),
)
self.conv_r5_2_up = nn.Sequential(
nn.Conv2d(512, 2048, kernel_size=1, bias=False),
norm_layer(2048),
)
def _forward_transformer(self, img, flo, words, phrase_mask, img_mask, stage, vis=False):
B, C, H, W = img.shape
pos_embed_img = positionalencoding2d(B, d_model=C, height=H, width=W)
pos_embed_img = pos_embed_img.flatten(2).permute(2, 0, 1).contiguous()
# [H*W, B, 512]
pos_embed_txt = positionalencoding1d(B, max_len=phrase_mask.shape[-1])
pos_embed_txt = pos_embed_txt.permute(1, 0, 2).contiguous()
# [N, B, 512]
key_padding_mask_img = ~img_mask.bool()
# [B, H*W]
key_padding_mask_txt = ~phrase_mask.bool()
# [B, N]
f_img = img.flatten(2).permute(2, 0, 1).contiguous()
f_flo = flo.flatten(2).permute(2, 0, 1).contiguous()
# [H*W, B, 512]
if not vis:
f_img, f_flo = self.transformer[stage](f_img, f_flo, words, pos_embed_img, pos_embed_txt, key_padding_mask_img,
key_padding_mask_txt)
else:
f_img, f_flo, vis_map = self.transformer[stage](f_img, f_flo, words, pos_embed_img, pos_embed_txt, key_padding_mask_img,
key_padding_mask_txt, True)
f_img = f_img.permute(1, 2, 0).contiguous()
f_flo = f_flo.permute(1, 2, 0).contiguous()
f_img = f_img.reshape(B, C, H, W)
f_flo = f_flo.reshape(B, C, H, W)
if not vis:
return f_img, f_flo
else:
return f_img, f_flo, vis_map
def forward(self, image, flow, phrase, phrase_mask, img_mask, vis=False):
# image: [B, 3, H, W]
# flow: [B, 3, H, W]
# phrase: [B, N, 300]
# phrase_mask: [B, N]
# img_mask: [B, 100]
if vis:
vis_dict = {}
f_text = self.text_encoder(phrase)
# [B, N, 512]
sent = f_text.sum(1, keepdim=True)
# [B, 1, 512]
words = f_text.permute(1, 0, 2).contiguous()
# [N, B, 512]
spa_feats = []
tem_feats = []
x1 = self.conv1_1(image)
x1 = self.bn1_1(x1)
x2 = self.conv1_2(flow)
x2 = self.bn1_2(x2)
| |
<filename>tests/test_derivatives.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import pytest
from eemeter.caltrack.design_matrices import (
create_caltrack_daily_design_matrix,
create_caltrack_billing_design_matrix,
create_caltrack_hourly_preliminary_design_matrix,
create_caltrack_hourly_segmented_design_matrices,
)
from eemeter.caltrack.hourly import fit_caltrack_hourly_model
from eemeter.caltrack.usage_per_day import fit_caltrack_usage_per_day_model
from eemeter.derivatives import metered_savings, modeled_savings
from eemeter.exceptions import MissingModelParameterError
from eemeter.features import estimate_hour_of_week_occupancy, fit_temperature_bins
from eemeter.segmentation import segment_time_series
from eemeter.transform import get_baseline_data, get_reporting_data
@pytest.fixture
def baseline_data_daily(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
blackout_start_date = il_electricity_cdd_hdd_daily["blackout_start_date"]
baseline_meter_data, warnings = get_baseline_data(
meter_data, end=blackout_start_date
)
baseline_data = create_caltrack_daily_design_matrix(
baseline_meter_data, temperature_data
)
return baseline_data
@pytest.fixture
def baseline_model_daily(baseline_data_daily):
model_results = fit_caltrack_usage_per_day_model(baseline_data_daily)
return model_results
@pytest.fixture
def reporting_data_daily(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
blackout_end_date = il_electricity_cdd_hdd_daily["blackout_end_date"]
reporting_meter_data, warnings = get_reporting_data(
meter_data, start=blackout_end_date
)
reporting_data = create_caltrack_daily_design_matrix(
reporting_meter_data, temperature_data
)
return reporting_data
@pytest.fixture
def reporting_model_daily(reporting_data_daily):
model_results = fit_caltrack_usage_per_day_model(reporting_data_daily)
return model_results
@pytest.fixture
def reporting_meter_data_daily():
index = pd.date_range("2011-01-01", freq="D", periods=60, tz="UTC")
return pd.DataFrame({"value": 1}, index=index)
@pytest.fixture
def reporting_temperature_data():
index = pd.date_range("2011-01-01", freq="D", periods=60, tz="UTC")
return pd.Series(np.arange(30.0, 90.0), index=index).asfreq("H").ffill()
def test_metered_savings_cdd_hdd_daily(
baseline_model_daily, reporting_meter_data_daily, reporting_temperature_data
):
results, error_bands = metered_savings(
baseline_model_daily, reporting_meter_data_daily, reporting_temperature_data
)
assert list(results.columns) == [
"reporting_observed",
"counterfactual_usage",
"metered_savings",
]
assert round(results.metered_savings.sum(), 2) == 1571.28
assert sorted(error_bands.keys()) == [
"FSU Error Band",
"OLS Error Band",
"OLS Error Band: Model Error",
"OLS Error Band: Noise",
]
@pytest.fixture
def baseline_model_billing(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
blackout_start_date = il_electricity_cdd_hdd_billing_monthly["blackout_start_date"]
baseline_meter_data, warnings = get_baseline_data(
meter_data, end=blackout_start_date
)
baseline_data = create_caltrack_billing_design_matrix(
baseline_meter_data, temperature_data
)
model_results = fit_caltrack_usage_per_day_model(
baseline_data, use_billing_presets=True, weights_col="n_days_kept"
)
return model_results
@pytest.fixture
def reporting_model_billing(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data.value = meter_data.value - 50
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
blackout_start_date = il_electricity_cdd_hdd_billing_monthly["blackout_start_date"]
baseline_meter_data, warnings = get_baseline_data(
meter_data, end=blackout_start_date
)
baseline_data = create_caltrack_billing_design_matrix(
baseline_meter_data, temperature_data
)
model_results = fit_caltrack_usage_per_day_model(
baseline_data, use_billing_presets=True, weights_col="n_days_kept"
)
return model_results
@pytest.fixture
def reporting_meter_data_billing():
index = pd.date_range("2011-01-01", freq="MS", periods=13, tz="UTC")
return pd.DataFrame({"value": 1}, index=index)
def test_metered_savings_cdd_hdd_billing(
baseline_model_billing, reporting_meter_data_billing, reporting_temperature_data
):
results, error_bands = metered_savings(
baseline_model_billing, reporting_meter_data_billing, reporting_temperature_data
)
assert list(results.columns) == [
"reporting_observed",
"counterfactual_usage",
"metered_savings",
]
assert round(results.metered_savings.sum(), 2) == 1625.73
assert sorted(error_bands.keys()) == [
"FSU Error Band",
"OLS Error Band",
"OLS Error Band: Model Error",
"OLS Error Band: Noise",
]
def test_metered_savings_cdd_hdd_billing_no_reporting_data(
baseline_model_billing, reporting_meter_data_billing, reporting_temperature_data
):
results, error_bands = metered_savings(
baseline_model_billing,
reporting_meter_data_billing[:0],
reporting_temperature_data,
)
assert list(results.columns) == [
"reporting_observed",
"counterfactual_usage",
"metered_savings",
]
assert round(results.metered_savings.sum(), 2) == 0.0
assert error_bands is None
def test_metered_savings_cdd_hdd_billing_single_record_reporting_data(
baseline_model_billing, reporting_meter_data_billing, reporting_temperature_data
):
results, error_bands = metered_savings(
baseline_model_billing,
reporting_meter_data_billing[:1],
reporting_temperature_data,
)
assert list(results.columns) == [
"reporting_observed",
"counterfactual_usage",
"metered_savings",
]
assert round(results.metered_savings.sum(), 2) == 0.0
assert error_bands is None
@pytest.fixture
def baseline_model_billing_single_record_baseline_data(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
blackout_start_date = il_electricity_cdd_hdd_billing_monthly["blackout_start_date"]
baseline_meter_data, warnings = get_baseline_data(
meter_data, end=blackout_start_date
)
baseline_data = create_caltrack_billing_design_matrix(
baseline_meter_data, temperature_data
)
baseline_data = baseline_data[:2]
model_results = fit_caltrack_usage_per_day_model(
baseline_data, use_billing_presets=True, weights_col="n_days_kept"
)
return model_results
def test_metered_savings_cdd_hdd_billing_single_record_baseline_data(
baseline_model_billing_single_record_baseline_data,
reporting_meter_data_billing,
reporting_temperature_data,
):
results, error_bands = metered_savings(
baseline_model_billing_single_record_baseline_data,
reporting_meter_data_billing,
reporting_temperature_data,
)
"""
assert list(results.columns) == [
"reporting_observed",
"counterfactual_usage",
"metered_savings",
]
assert round(results.metered_savings.sum(), 2) == 1625.73
assert sorted(error_bands.keys()) == [
"FSU Error Band",
"OLS Error Band",
"OLS Error Band: Model Error",
"OLS Error Band: Noise",
]
"""
@pytest.fixture
def reporting_meter_data_billing_wrong_timestamp():
index = pd.date_range("2003-01-01", freq="MS", periods=13, tz="UTC")
return pd.DataFrame({"value": 1}, index=index)
def test_metered_savings_cdd_hdd_billing_reporting_data_wrong_timestamp(
baseline_model_billing,
reporting_meter_data_billing_wrong_timestamp,
reporting_temperature_data,
):
results, error_bands = metered_savings(
baseline_model_billing,
reporting_meter_data_billing_wrong_timestamp,
reporting_temperature_data,
)
assert list(results.columns) == [
"reporting_observed",
"counterfactual_usage",
"metered_savings",
]
assert round(results.metered_savings.sum(), 2) == 0.0
assert error_bands is None
def test_metered_savings_cdd_hdd_daily_hourly_degree_days(
baseline_model_daily, reporting_meter_data_daily, reporting_temperature_data
):
results, error_bands = metered_savings(
baseline_model_daily, reporting_meter_data_daily, reporting_temperature_data
)
assert list(results.columns) == [
"reporting_observed",
"counterfactual_usage",
"metered_savings",
]
assert round(results.metered_savings.sum(), 2) == 1571.28
assert round(error_bands["FSU Error Band"], 2) == 601.52
def test_metered_savings_cdd_hdd_no_params(
baseline_model_daily, reporting_meter_data_daily, reporting_temperature_data
):
baseline_model_daily.model.model_params = None
with pytest.raises(MissingModelParameterError):
metered_savings(
baseline_model_daily, reporting_meter_data_daily, reporting_temperature_data
)
def test_metered_savings_cdd_hdd_daily_with_disaggregated(
baseline_model_daily, reporting_meter_data_daily, reporting_temperature_data
):
results, error_bands = metered_savings(
baseline_model_daily,
reporting_meter_data_daily,
reporting_temperature_data,
with_disaggregated=True,
)
assert list(sorted(results.columns)) == [
"counterfactual_base_load",
"counterfactual_cooling_load",
"counterfactual_heating_load",
"counterfactual_usage",
"metered_savings",
"reporting_observed",
]
assert round(error_bands["FSU Error Band"], 2) == 601.52
def test_modeled_savings_cdd_hdd_daily(
baseline_model_daily,
reporting_model_daily,
reporting_meter_data_daily,
reporting_temperature_data,
):
# using reporting data for convenience, but intention is to use normal data
results, error_bands = modeled_savings(
baseline_model_daily,
reporting_model_daily,
reporting_meter_data_daily.index,
reporting_temperature_data,
)
assert list(results.columns) == [
"modeled_baseline_usage",
"modeled_reporting_usage",
"modeled_savings",
]
assert round(results.modeled_savings.sum(), 2) == 168.58
assert round(error_bands["FSU Error Band: Baseline"], 2) == 601.52
assert round(error_bands["FSU Error Band: Reporting"], 2) == 534.78
assert round(error_bands["FSU Error Band"], 2) == 804.87
def test_modeled_savings_cdd_hdd_daily_hourly_degree_days(
baseline_model_daily,
reporting_model_daily,
reporting_meter_data_daily,
reporting_temperature_data,
):
# using reporting data for convenience, but intention is to use normal data
results, error_bands = modeled_savings(
baseline_model_daily,
reporting_model_daily,
reporting_meter_data_daily.index,
reporting_temperature_data,
predict_kwargs={"degree_day_method": "hourly"},
)
assert list(results.columns) == [
"modeled_baseline_usage",
"modeled_reporting_usage",
"modeled_savings",
]
assert round(results.modeled_savings.sum(), 2) == 168.58
assert round(error_bands["FSU Error Band: Baseline"], 2) == 601.52
assert round(error_bands["FSU Error Band: Reporting"], 2) == 534.78
assert round(error_bands["FSU Error Band"], 2) == 804.87
def test_modeled_savings_cdd_hdd_daily_baseline_model_no_params(
baseline_model_daily,
reporting_model_daily,
reporting_meter_data_daily,
reporting_temperature_data,
):
baseline_model_daily.model.model_params = None
with pytest.raises(MissingModelParameterError):
modeled_savings(
baseline_model_daily,
reporting_model_daily,
reporting_meter_data_daily.index,
reporting_temperature_data,
)
def test_modeled_savings_cdd_hdd_daily_reporting_model_no_params(
baseline_model_daily,
reporting_model_daily,
reporting_meter_data_daily,
reporting_temperature_data,
):
reporting_model_daily.model.model_params = None
with pytest.raises(MissingModelParameterError):
modeled_savings(
baseline_model_daily,
reporting_model_daily,
reporting_meter_data_daily.index,
reporting_temperature_data,
)
def test_modeled_savings_cdd_hdd_daily_with_disaggregated(
baseline_model_daily,
reporting_model_daily,
reporting_meter_data_daily,
reporting_temperature_data,
):
# using reporting data for convenience, but intention is to use normal data
results, error_bands = modeled_savings(
baseline_model_daily,
reporting_model_daily,
reporting_meter_data_daily.index,
reporting_temperature_data,
with_disaggregated=True,
)
assert list(sorted(results.columns)) == [
"modeled_base_load_savings",
"modeled_baseline_base_load",
"modeled_baseline_cooling_load",
"modeled_baseline_heating_load",
"modeled_baseline_usage",
"modeled_cooling_load_savings",
"modeled_heating_load_savings",
"modeled_reporting_base_load",
"modeled_reporting_cooling_load",
"modeled_reporting_heating_load",
"modeled_reporting_usage",
"modeled_savings",
]
assert round(error_bands["FSU Error Band: Baseline"], 2) == 601.52
assert round(error_bands["FSU Error Band: Reporting"], 2) == 534.78
assert round(error_bands["FSU Error Band"], 2) == 804.87
def test_modeled_savings_daily_empty_temperature_data(
baseline_model_daily, reporting_model_daily
):
index = pd.DatetimeIndex([], tz="UTC", name="dt", freq="H")
temperature_data = pd.Series([], index=index)
meter_data_index = temperature_data.resample("D").sum().index
# using reporting data for convenience, but intention is to use normal data
results, error_bands = modeled_savings(
baseline_model_daily, reporting_model_daily, meter_data_index, temperature_data
)
assert results.shape == (0, 3)
assert list(results.columns) == [
"modeled_baseline_usage",
"modeled_reporting_usage",
"modeled_savings",
]
assert error_bands is None
@pytest.fixture
def baseline_model_hourly(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_meter_data, warnings = get_baseline_data(
meter_data, end=blackout_start_date
)
preliminary_hourly_design_matrix = create_caltrack_hourly_preliminary_design_matrix(
baseline_meter_data, temperature_data
)
segmentation = segment_time_series(
preliminary_hourly_design_matrix.index, "three_month_weighted"
)
occupancy_lookup = estimate_hour_of_week_occupancy(
preliminary_hourly_design_matrix, segmentation=segmentation
)
occupied_temperature_bins, unoccupied_temperature_bins = fit_temperature_bins(
preliminary_hourly_design_matrix,
segmentation=segmentation,
occupancy_lookup=occupancy_lookup,
)
design_matrices = create_caltrack_hourly_segmented_design_matrices(
preliminary_hourly_design_matrix,
segmentation,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
)
segmented_model = fit_caltrack_hourly_model(
design_matrices,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
)
return segmented_model
@pytest.fixture
def reporting_model_hourly(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_meter_data, warnings = get_reporting_data(
meter_data, start=blackout_end_date
)
preliminary_hourly_design_matrix = create_caltrack_hourly_preliminary_design_matrix(
reporting_meter_data, temperature_data
)
segmentation = segment_time_series(
preliminary_hourly_design_matrix.index, "three_month_weighted"
)
occupancy_lookup = estimate_hour_of_week_occupancy(
preliminary_hourly_design_matrix, segmentation=segmentation
)
occupied_temperature_bins, unoccupied_temperature_bins = fit_temperature_bins(
preliminary_hourly_design_matrix,
segmentation=segmentation,
occupancy_lookup=occupancy_lookup,
)
design_matrices = create_caltrack_hourly_segmented_design_matrices(
preliminary_hourly_design_matrix,
segmentation,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
)
segmented_model = fit_caltrack_hourly_model(
design_matrices,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
)
return segmented_model
@pytest.fixture
def reporting_meter_data_hourly():
index = pd.date_range("2011-01-01", freq="D", periods=60, tz="UTC")
return pd.DataFrame({"value": 1}, index=index).asfreq("H").ffill()
def test_metered_savings_cdd_hdd_hourly(
baseline_model_hourly, reporting_meter_data_hourly, reporting_temperature_data
):
results, error_bands = metered_savings(
baseline_model_hourly, reporting_meter_data_hourly, reporting_temperature_data
)
assert list(results.columns) == [
"reporting_observed",
"counterfactual_usage",
"metered_savings",
]
assert round(results.metered_savings.sum(), 2) == -403.7
assert error_bands is None
def test_modeled_savings_cdd_hdd_hourly(
baseline_model_hourly,
reporting_model_hourly,
reporting_meter_data_hourly,
reporting_temperature_data,
):
# using reporting data for convenience, but intention is to use normal data
results, error_bands = modeled_savings(
baseline_model_hourly,
reporting_model_hourly,
reporting_meter_data_hourly.index,
reporting_temperature_data,
)
assert list(results.columns) == [
"modeled_baseline_usage",
"modeled_reporting_usage",
"modeled_savings",
]
assert round(results.modeled_savings.sum(), 2) == 55.3
assert error_bands is None
@pytest.fixture
def normal_year_temperature_data():
index = pd.date_range("2015-01-01", freq="D", periods=365, tz="UTC")
np.random.seed(0)
return pd.Series(np.random.rand(365) * 30 + 45, index=index).asfreq("H").ffill()
def test_modeled_savings_cdd_hdd_billing(
baseline_model_billing, reporting_model_billing, normal_year_temperature_data
):
results, error_bands = modeled_savings(
baseline_model_billing,
reporting_model_billing,
pd.date_range("2015-01-01", freq="D", periods=365, tz="UTC"),
normal_year_temperature_data,
)
assert list(results.columns) == [
"modeled_baseline_usage",
"modeled_reporting_usage",
"modeled_savings",
]
assert round(results.modeled_savings.sum(), 2) == 587.44
assert sorted(error_bands.keys()) == [
"FSU Error Band",
"FSU Error Band: Baseline",
"FSU Error Band: Reporting",
]
assert round(error_bands["FSU Error Band"], 2) == 156.89
@pytest.fixture
def reporting_meter_data_billing_not_aligned():
index = pd.date_range("2001-01-01", freq="MS", periods=13, tz="UTC")
return pd.DataFrame({"value": None}, index=index)
def test_metered_savings_not_aligned_reporting_data(
baseline_model_billing,
reporting_meter_data_billing_not_aligned,
reporting_temperature_data,
):
results, error_bands = metered_savings(
baseline_model_billing,
reporting_meter_data_billing_not_aligned,
reporting_temperature_data,
)
assert list(results.columns) == [
"reporting_observed",
"counterfactual_usage",
"metered_savings",
]
assert round(results.metered_savings.sum(), 2) == 0.0
assert error_bands is None
@pytest.fixture
def baseline_model_billing_single_record(il_electricity_cdd_hdd_billing_monthly):
baseline_meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"][-2:]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
blackout_start_date = il_electricity_cdd_hdd_billing_monthly["blackout_start_date"]
baseline_data = create_caltrack_billing_design_matrix(
baseline_meter_data, temperature_data
)
model_results = fit_caltrack_usage_per_day_model(
baseline_data, use_billing_presets=True, weights_col="n_days_kept"
)
return model_results
def test_metered_savings_model_single_record(
baseline_model_billing_single_record,
reporting_meter_data_billing,
reporting_temperature_data,
):
assert pd.isnull(baseline_model_billing_single_record.totals_metrics.autocorr_resid)
# simulating deserialization
baseline_model_billing_single_record.totals_metrics.autocorr_resid = None
results, error_bands = metered_savings(
baseline_model_billing_single_record,
reporting_meter_data_billing,
reporting_temperature_data,
| |
"""
TransitionMap and derived classes.
"""
import numpy as np
from importlib_resources import path
from numpy import array
from flatland.core.grid.grid4 import Grid4Transitions
from flatland.core.grid.grid4_utils import get_new_position, get_direction
from flatland.core.grid.grid_utils import IntVector2DArray, IntVector2D
from flatland.core.grid.grid_utils import Vec2dOperations as Vec2d
from flatland.core.grid.rail_env_grid import RailEnvTransitions
from flatland.core.transitions import Transitions
from flatland.utils.ordered_set import OrderedSet
# TODO are these general classes or for grid4 only?
class TransitionMap:
"""
Base TransitionMap class.
Generic class that implements a collection of transitions over a set of
cells.
"""
def get_transitions(self, cell_id):
"""
Return a tuple of transitions available in a cell specified by
`cell_id` (e.g., a tuple of size of the maximum number of transitions,
with values 0 or 1, or potentially in between,
for stochastic transitions).
Parameters
----------
cell_id : [cell identifier]
The cell_id object depends on the specific implementation.
It generally is an int (e.g., an index) or a tuple of indices.
Returns
-------
tuple
List of the validity of transitions in the cell.
"""
raise NotImplementedError()
def set_transitions(self, cell_id, new_transitions):
"""
Replaces the available transitions in cell `cell_id` with the tuple
`new_transitions'. `new_transitions` must have
one element for each possible transition.
Parameters
----------
cell_id : [cell identifier]
The cell_id object depends on the specific implementation.
It generally is an int (e.g., an index) or a tuple of indices.
new_transitions : tuple
Tuple of new transitions validitiy for the cell.
"""
raise NotImplementedError()
def get_transition(self, cell_id, transition_index):
"""
Return the status of whether an agent in cell `cell_id` can perform a
movement along transition `transition_index` (e.g., the NESW direction
of movement, for agents on a grid).
Parameters
----------
cell_id : [cell identifier]
The cell_id object depends on the specific implementation.
It generally is an int (e.g., an index) or a tuple of indices.
transition_index : int
Index of the transition to probe, as index in the tuple returned by
get_transitions(). e.g., the NESW direction of movement, for agents
on a grid.
Returns
-------
int or float (depending on Transitions used)
Validity of the requested transition (e.g.,
0/1 allowed/not allowed, a probability in [0,1], etc...)
"""
raise NotImplementedError()
def set_transition(self, cell_id, transition_index, new_transition):
"""
Replaces the validity of transition to `transition_index` in cell
`cell_id' with the new `new_transition`.
Parameters
----------
cell_id : [cell identifier]
The cell_id object depends on the specific implementation.
It generally is an int (e.g., an index) or a tuple of indices.
transition_index : int
Index of the transition to probe, as index in the tuple returned by
get_transitions(). e.g., the NESW direction of movement, for agents
on a grid.
new_transition : int or float (depending on Transitions used)
Validity of the requested transition (e.g.,
0/1 allowed/not allowed, a probability in [0,1], etc...)
"""
raise NotImplementedError()
class GridTransitionMap(TransitionMap):
"""
Implements a TransitionMap over a 2D grid.
GridTransitionMap implements utility functions.
"""
def __init__(self, width, height, transitions: Transitions = Grid4Transitions([]), random_seed=None):
"""
Builder for GridTransitionMap object.
Parameters
----------
width : int
Width of the grid.
height : int
Height of the grid.
transitions : Transitions object
The Transitions object to use to encode/decode transitions over the
grid.
"""
self.width = width
self.height = height
self.transitions = transitions
self.random_generator = np.random.RandomState()
if random_seed is None:
self.random_generator.seed(12)
else:
self.random_generator.seed(random_seed)
self.grid = np.zeros((height, width), dtype=self.transitions.get_type())
def get_full_transitions(self, row, column):
"""
Returns the full transitions for the cell at (row, column) in the format transition_map's transitions.
Parameters
----------
row: int
column: int
(row,column) specifies the cell in this transition map.
Returns
-------
self.transitions.get_type()
The cell content int the format of this map's Transitions.
"""
return self.grid[row][column]
def get_transitions(self, row, column, orientation):
"""
Return a tuple of transitions available in a cell specified by
`cell_id` (e.g., a tuple of size of the maximum number of transitions,
with values 0 or 1, or potentially in between,
for stochastic transitions).
Parameters
----------
cell_id : tuple
The cell_id indices a cell as (column, row, orientation),
where orientation is the direction an agent is facing within a cell.
Alternatively, it can be accessed as (column, row) to return the
full cell content.
Returns
-------
tuple
List of the validity of transitions in the cell as given by the maps transitions.
"""
return self.transitions.get_transitions(self.grid[row][column], orientation)
def set_transitions(self, cell_id, new_transitions):
"""
Replaces the available transitions in cell `cell_id` with the tuple
`new_transitions'. `new_transitions` must have
one element for each possible transition.
Parameters
----------
cell_id : tuple
The cell_id indices a cell as (column, row, orientation),
where orientation is the direction an agent is facing within a cell.
Alternatively, it can be accessed as (column, row) to replace the
full cell content.
new_transitions : tuple
Tuple of new transitions validitiy for the cell.
"""
assert len(cell_id) in (2, 3), \
'GridTransitionMap.set_transitions() ERROR: cell_id tuple must have length 2 or 3.'
if len(cell_id) == 3:
self.grid[cell_id[0]][cell_id[1]] = self.transitions.set_transitions(self.grid[cell_id[0]][cell_id[1]],
cell_id[2],
new_transitions)
elif len(cell_id) == 2:
self.grid[cell_id[0]][cell_id[1]] = new_transitions
def get_transition(self, cell_id, transition_index):
"""
Return the status of whether an agent in cell `cell_id` can perform a
movement along transition `transition_index` (e.g., the NESW direction
of movement, for agents on a grid).
Parameters
----------
cell_id : tuple
The cell_id indices a cell as (column, row, orientation),
where orientation is the direction an agent is facing within a cell.
transition_index : int
Index of the transition to probe, as index in the tuple returned by
get_transitions(). e.g., the NESW direction of movement, for agents
on a grid.
Returns
-------
int or float (depending on Transitions used in the )
Validity of the requested transition (e.g.,
0/1 allowed/not allowed, a probability in [0,1], etc...)
"""
assert len(cell_id) == 3, \
'GridTransitionMap.get_transition() ERROR: cell_id tuple must have length 2 or 3.'
return self.transitions.get_transition(self.grid[cell_id[0]][cell_id[1]], cell_id[2], transition_index)
def set_transition(self, cell_id, transition_index, new_transition, remove_deadends=False):
"""
Replaces the validity of transition to `transition_index` in cell
`cell_id' with the new `new_transition`.
Parameters
----------
cell_id : tuple
The cell_id indices a cell as (column, row, orientation),
where orientation is the direction an agent is facing within a cell.
transition_index : int
Index of the transition to probe, as index in the tuple returned by
get_transitions(). e.g., the NESW direction of movement, for agents
on a grid.
new_transition : int or float (depending on Transitions used in the map.)
Validity of the requested transition (e.g.,
0/1 allowed/not allowed, a probability in [0,1], etc...)
"""
assert len(cell_id) == 3, \
'GridTransitionMap.set_transition() ERROR: cell_id tuple must have length 3.'
self.grid[cell_id[0]][cell_id[1]] = self.transitions.set_transition(
self.grid[cell_id[0]][cell_id[1]],
cell_id[2],
transition_index,
new_transition,
remove_deadends)
def save_transition_map(self, filename):
"""
Save the transitions grid as `filename`, in npy format.
Parameters
----------
filename : string
Name of the file to which to save the transitions grid.
"""
np.save(filename, self.grid)
def load_transition_map(self, package, resource):
"""
Load the transitions grid from `filename` (npy format).
The load function only updates the transitions grid, and possibly width and height, but the object has to be
initialized with the correct `transitions` object anyway.
Parameters
----------
package : string
Name of the package from which to load the transitions grid.
resource : string
Name of the file from which to load the transitions grid within the package.
override_gridsize : bool
If override_gridsize=True, the width and height of the GridTransitionMap object are replaced with the size
of the map loaded from `filename`. If override_gridsize=False, the transitions grid is either cropped (if
the grid size is larger than (height,width) ) or padded with zeros (if the grid size is smaller than
(height,width) )
"""
with path(package, resource) as file_in:
new_grid = np.load(file_in)
new_height = new_grid.shape[0]
new_width = new_grid.shape[1]
self.width = new_width
self.height = new_height
self.grid = new_grid
def is_dead_end(self, rcPos: IntVector2DArray):
"""
Check if the cell is a dead-end.
Parameters
----------
rcPos: Tuple[int,int]
tuple(row, column) with grid coordinate
Returns
-------
boolean
True if and only if the cell is a dead-end.
"""
nbits = 0
tmp = self.get_full_transitions(rcPos[0], rcPos[1])
while tmp > 0:
nbits += (tmp & 1)
tmp = tmp >> 1
return nbits == 1
def is_simple_turn(self, rcPos: IntVector2DArray):
"""
Check if the | |
import difflib
import logging
from typing import List, Optional, Set
from irrd.conf import get_setting
from irrd.rpki.status import RPKIStatus
from irrd.rpki.validators import SingleRouteROAValidator
from irrd.rpsl.parser import UnknownRPSLObjectClassException, RPSLObject
from irrd.rpsl.rpsl_objects import rpsl_object_from_text, RPSLMntner
from irrd.scopefilter.status import ScopeFilterStatus
from irrd.scopefilter.validators import ScopeFilterValidator
from irrd.storage.database_handler import DatabaseHandler
from irrd.storage.models import JournalEntryOrigin
from irrd.storage.queries import RPSLDatabaseQuery
from irrd.utils.text import splitline_unicodesafe
from .parser_state import UpdateRequestType, UpdateRequestStatus
from .validators import ReferenceValidator, AuthValidator
logger = logging.getLogger(__name__)
class ChangeRequest:
"""
A ChangeRequest tracks and processes a request for a single change.
In this context, a change can be creating, modifying or deleting an
RPSL object.
"""
rpsl_text_submitted: str
rpsl_obj_new: Optional[RPSLObject]
rpsl_obj_current: Optional[RPSLObject] = None
status = UpdateRequestStatus.PROCESSING
request_type: Optional[UpdateRequestType] = None
mntners_notify: List[RPSLMntner]
error_messages: List[str]
info_messages: List[str]
def __init__(self, rpsl_text_submitted: str, database_handler: DatabaseHandler, auth_validator: AuthValidator,
reference_validator: ReferenceValidator, delete_reason=Optional[str]) -> None:
"""
Initialise a new change request for a single RPSL object.
:param rpsl_text_submitted: the object text
:param database_handler: a DatabaseHandler instance
:param auth_validator: a AuthValidator instance, to resolve authentication requirements
:param reference_validator: a ReferenceValidator instance, to resolve references between objects
:param delete_reason: a string with the deletion reason, if this was a deletion request
The rpsl_text passed into this function should be cleaned from any
meta attributes like delete/override/password. Those should be passed
into this method as delete_reason, or provided to the AuthValidator.
The auth_validator and reference_validator must be shared between
different instances, to benefit from caching, and to resolve references
between different objects that are part of the same submission with
possibly multiple changes.
"""
self.database_handler = database_handler
self.auth_validator = auth_validator
self.reference_validator = reference_validator
self.rpsl_text_submitted = rpsl_text_submitted
self.mntners_notify = []
self.used_override = False
self._cached_roa_validity: Optional[bool] = None
self.roa_validator = SingleRouteROAValidator(database_handler)
self.scopefilter_validator = ScopeFilterValidator()
try:
self.rpsl_obj_new = rpsl_object_from_text(rpsl_text_submitted, strict_validation=True)
if self.rpsl_obj_new.messages.errors():
self.status = UpdateRequestStatus.ERROR_PARSING
self.error_messages = self.rpsl_obj_new.messages.errors()
self.info_messages = self.rpsl_obj_new.messages.infos()
logger.debug(f'{id(self)}: Processing new ChangeRequest for object {self.rpsl_obj_new}: request {id(self)}')
except UnknownRPSLObjectClassException as exc:
self.rpsl_obj_new = None
self.request_type = None
self.status = UpdateRequestStatus.ERROR_UNKNOWN_CLASS
self.info_messages = []
self.error_messages = [str(exc)]
if self.is_valid() and self.rpsl_obj_new:
source = self.rpsl_obj_new.source()
if not get_setting(f'sources.{source}.authoritative'):
logger.debug(f'{id(self)}: change is for non-authoritative source {source}, rejected')
self.error_messages.append(f'This instance is not authoritative for source {source}')
self.status = UpdateRequestStatus.ERROR_NON_AUTHORITIVE
return
self._retrieve_existing_version()
if delete_reason:
self.request_type = UpdateRequestType.DELETE
if not self.rpsl_obj_current:
self.status = UpdateRequestStatus.ERROR_PARSING
self.error_messages.append('Can not delete object: no object found for this key in this database.')
logger.debug(f'{id(self)}: Request attempts to delete object {self.rpsl_obj_new}, '
f'but no existing object found.')
def _retrieve_existing_version(self):
"""
Retrieve the current version of this object, if any, and store it in rpsl_obj_current.
Update self.status appropriately.
"""
query = RPSLDatabaseQuery().sources([self.rpsl_obj_new.source()])
query = query.object_classes([self.rpsl_obj_new.rpsl_object_class]).rpsl_pk(self.rpsl_obj_new.pk())
results = list(self.database_handler.execute_query(query))
if not results:
self.request_type = UpdateRequestType.CREATE
logger.debug(f'{id(self)}: Did not find existing version for object {self.rpsl_obj_new}, request is CREATE')
elif len(results) == 1:
self.request_type = UpdateRequestType.MODIFY
self.rpsl_obj_current = rpsl_object_from_text(results[0]['object_text'], strict_validation=False)
logger.debug(f'{id(self)}: Retrieved existing version for object '
f'{self.rpsl_obj_current}, request is MODIFY/DELETE')
else: # pragma: no cover
# This should not be possible, as rpsl_pk/source are a composite unique value in the database scheme.
# Therefore, a query should not be able to affect more than one row.
affected_pks = ', '.join([r['pk'] for r in results])
msg = f'{id(self)}: Attempted to retrieve current version of object {self.rpsl_obj_new.pk()}/'
msg += f'{self.rpsl_obj_new.source()}, but multiple '
msg += f'objects were found, internal pks found: {affected_pks}'
logger.error(msg)
raise ValueError(msg)
def save(self, database_handler: DatabaseHandler) -> None:
"""Save the change to the database."""
if self.status != UpdateRequestStatus.PROCESSING or not self.rpsl_obj_new:
raise ValueError('ChangeRequest can only be saved in status PROCESSING')
if self.request_type == UpdateRequestType.DELETE and self.rpsl_obj_current is not None:
logger.info(f'{id(self)}: Saving change for {self.rpsl_obj_new}: deleting current object')
database_handler.delete_rpsl_object(rpsl_object=self.rpsl_obj_current, origin=JournalEntryOrigin.auth_change)
else:
logger.info(f'{id(self)}: Saving change for {self.rpsl_obj_new}: inserting/updating current object')
database_handler.upsert_rpsl_object(self.rpsl_obj_new, JournalEntryOrigin.auth_change)
self.status = UpdateRequestStatus.SAVED
def is_valid(self):
return self.status in [UpdateRequestStatus.SAVED, UpdateRequestStatus.PROCESSING]
def submitter_report(self) -> str:
"""Produce a string suitable for reporting back status and messages to the human submitter."""
status = 'succeeded' if self.is_valid() else 'FAILED'
report = f'{self.request_type_str().title()} {status}: [{self.object_class_str()}] {self.object_pk_str()}\n'
if self.info_messages or self.error_messages:
if not self.rpsl_obj_new or self.error_messages:
report += '\n' + self.rpsl_text_submitted + '\n'
else:
report += '\n' + self.rpsl_obj_new.render_rpsl_text() + '\n'
report += ''.join([f'ERROR: {e}\n' for e in self.error_messages])
report += ''.join([f'INFO: {e}\n' for e in self.info_messages])
return report
def notification_target_report(self):
"""
Produce a string suitable for reporting back status and messages
to a human notification target, i.e. someone listed
in notify/upd-to/mnt-nfy.
"""
if not self.is_valid() and self.status != UpdateRequestStatus.ERROR_AUTH:
raise ValueError('Notification reports can only be made for changes that are valid '
'or have failed authorisation.')
status = 'succeeded' if self.is_valid() else 'FAILED AUTHORISATION'
report = f'{self.request_type_str().title()} {status} for object below: '
report += f'[{self.object_class_str()}] {self.object_pk_str()}:\n\n'
if self.request_type == UpdateRequestType.MODIFY:
current_text = list(splitline_unicodesafe(self.rpsl_obj_current.render_rpsl_text()))
new_text = list(splitline_unicodesafe(self.rpsl_obj_new.render_rpsl_text()))
diff = list(difflib.unified_diff(current_text, new_text, lineterm=''))
report += '\n'.join(diff[2:]) # skip the lines from the diff which would have filenames
if self.status == UpdateRequestStatus.ERROR_AUTH:
report += '\n\n*Rejected* new version of this object:\n\n'
else:
report += '\n\nNew version of this object:\n\n'
if self.request_type == UpdateRequestType.DELETE:
report += self.rpsl_obj_current.render_rpsl_text()
else:
report += self.rpsl_obj_new.render_rpsl_text()
return report
def request_type_str(self) -> str:
return self.request_type.value if self.request_type else 'request'
def object_pk_str(self) -> str:
return self.rpsl_obj_new.pk() if self.rpsl_obj_new else '(unreadable object key)'
def object_class_str(self) -> str:
return self.rpsl_obj_new.rpsl_object_class if self.rpsl_obj_new else '(unreadable object class)'
def notification_targets(self) -> Set[str]:
"""
Produce a set of e-mail addresses that should be notified
about the change to this object.
May include mntner upd-to or mnt-nfy, and notify of existing object.
"""
targets: Set[str] = set()
status_qualifies_notification = self.is_valid() or self.status == UpdateRequestStatus.ERROR_AUTH
if self.used_override or not status_qualifies_notification:
return targets
mntner_attr = 'upd-to' if self.status == UpdateRequestStatus.ERROR_AUTH else 'mnt-nfy'
for mntner in self.mntners_notify:
for email in mntner.parsed_data.get(mntner_attr, []):
targets.add(email)
if self.rpsl_obj_current:
for email in self.rpsl_obj_current.parsed_data.get('notify', []):
targets.add(email)
return targets
def validate(self) -> bool:
auth_valid = self._check_auth()
if not auth_valid:
return False
references_valid = self._check_references()
rpki_valid = self._check_conflicting_roa()
scopefilter_valid = self._check_scopefilter()
return references_valid and rpki_valid and scopefilter_valid
def _check_auth(self) -> bool:
assert self.rpsl_obj_new
auth_result = self.auth_validator.process_auth(self.rpsl_obj_new, self.rpsl_obj_current)
self.info_messages += auth_result.info_messages
self.mntners_notify = auth_result.mntners_notify
if not auth_result.is_valid():
self.status = UpdateRequestStatus.ERROR_AUTH
self.error_messages += auth_result.error_messages
logger.debug(f'{id(self)}: Authentication check failed: {auth_result.error_messages}')
return False
self.used_override = auth_result.used_override
logger.debug(f'{id(self)}: Authentication check succeeded')
return True
def _check_references(self) -> bool:
"""
Check all references from this object to or from other objects.
For deletions, only references to the deleted object matter, as
they now become invalid. For other operations, only the validity
of references from the new object to others matter.
"""
if self.request_type == UpdateRequestType.DELETE and self.rpsl_obj_current is not None:
assert self.rpsl_obj_new
references_result = self.reference_validator.check_references_from_others(self.rpsl_obj_current)
else:
assert self.rpsl_obj_new
references_result = self.reference_validator.check_references_to_others(self.rpsl_obj_new)
self.info_messages += references_result.info_messages
if not references_result.is_valid():
self.error_messages += references_result.error_messages
logger.debug(f'{id(self)}: Reference check failed: {references_result.error_messages}')
if self.is_valid(): # Only change the status if this object was valid prior, so this is the first failure
self.status = UpdateRequestStatus.ERROR_REFERENCE
return False
logger.debug(f'{id(self)}: Reference check succeeded')
return True
def _check_conflicting_roa(self) -> bool:
"""
Check whether there are any conflicting ROAs with the new object.
Result is cached, as validate() may be called multiple times,
but the result of this check will not change. Always returns
True when not in RPKI-aware mode.
"""
assert self.rpsl_obj_new
if self._cached_roa_validity is not None:
return self._cached_roa_validity
if not get_setting('rpki.roa_source') or not self.rpsl_obj_new.rpki_relevant:
return True
# Deletes are permitted for RPKI-invalids, other operations are not
if self.request_type == UpdateRequestType.DELETE:
return True
assert self.rpsl_obj_new.asn_first
validation_result = self.roa_validator.validate_route(
self.rpsl_obj_new.prefix, self.rpsl_obj_new.asn_first, self.rpsl_obj_new.source()
)
if validation_result == RPKIStatus.invalid:
import_timer = get_setting('rpki.roa_import_timer')
user_message = 'RPKI ROAs were found that conflict with this object. '
user_message += f'(This IRRd refreshes ROAs every {import_timer} seconds.)'
logger.debug(f'{id(self)}: Conflicting ROAs found')
if self.is_valid(): # Only change the status if this object was valid prior, so this is first failure
self.status = UpdateRequestStatus.ERROR_ROA
self.error_messages.append(user_message)
self._cached_roa_validity = False
return False
else:
logger.debug(f'{id(self)}: No conflicting ROAs found')
self._cached_roa_validity = True
return True
def _check_scopefilter(self) -> bool:
if self.request_type == UpdateRequestType.DELETE or not self.rpsl_obj_new:
return True
result, comment = self.scopefilter_validator.validate_rpsl_object(self.rpsl_obj_new)
if result in [ScopeFilterStatus.out_scope_prefix, ScopeFilterStatus.out_scope_as]:
user_message = 'Contains out of scope information: ' + comment
if self.request_type == UpdateRequestType.CREATE:
logger.debug(f'{id(self)}: object out of scope: ' + comment)
if self.is_valid(): # | |
<gh_stars>0
# coding=utf-8
# Copyright (c) 2020 <NAME>
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
python2 -m pydocmk2 build
"""
from __future__ import print_function
from .document import Index
from .preprocessors.pydocmk import pydoc_html
from .imp import import_object, dir_object
import argparse
import atexit
import os
import shutil
import signal
import subprocess
import sys
import yaml
import json
# parser = ArgumentParser()
# parser.add_argument('command', choices=['generate', 'build', 'gh-deploy',
# 'json', 'new', 'serve', 'simple'])
# parser.add_argument('subargs', nargs='...')
def log(*args, **kwargs):
kwargs.setdefault('file', sys.stderr)
print(*args, **kwargs)
class PyDocMk(object):
def __init__(self):
self.args = None
self.command = 'build'
self.clean = False
self.config_dir = os.getcwd()
self.docs_dir = os.path.join(self.config_dir, 'srcdocs')
self.gens_dir = os.path.join(self.config_dir, '_build/mkdocs')
self.pre_dir = None
self.post_dir = None
self.config_path = os.path.join(self.config_dir, 'pydocmk.yml')
self.mkdocs_path = None
self.config = {}
self.mk_config = {}
self.python_path = 'python2'
self.subargs = []
self.config_custom = None
def parse_args(self):
parser = argparse.ArgumentParser(
prog="pydocmk2",
description="document Python 2 modules with MkDocs"
)
parser.add_argument(
"command",
default='build',
choices=['generate', 'build', 'gh-deploy',
'json', 'new', 'serve', 'simple', 'pydoc'],
help="""`simple`: output one .md file, e.g. `simple mypackage+ mypackage.my module+ > docs.md`,
`pydoc`: output the content of the pydocmk preprocessor,
`generate`: generate .md but do not run mkdocs,
`build`: generate .md and build with mkdocs,
`serve`: generate .md, build with mkdocs and run a webserver"""
)
parser.add_argument(
"subargs",
nargs="*",
help="optional arguments passed to mkdocs"
)
parser.add_argument(
"-F",
"--config",
dest="config_custom",
type=str,
help="extra config params as JSON: --config={'key':'value','key':'value'}"
)
parser.add_argument(
"-f",
"--config-file",
default=self.config_path,
type=str,
dest="config_path",
help="pydocmk.yml YAML config file: --config-file=pydocmk.yml"
)
parser.add_argument(
"-m",
"--mkdocs-file",
type=str,
dest="mkdocs_path",
help="mkdocs.yml YAML config file to build/use: --mkdocs-file=mkdocs.yml"
)
parser.add_argument(
"-p",
"--python",
default="python2",
type=str,
dest="python_path",
help="Python executable to run mkdocs: --python=python2"
)
parser.add_argument(
"-c",
"--clean",
dest="clean",
action="store_true"
)
self.args = parser.parse_args()
if self.args.command:
self.command = self.args.command
if self.args.subargs:
self.subargs = self.args.subargs
if self.args.clean:
self.clean = self.args.clean
self.config_path = self.args.config_path # 'pydocmk.yml'
self.config_dir = os.path.dirname(self.config_path)
if self.args.python_path:
self.python_path = self.args.python_path
if self.args.config_custom:
self.config_custom = self.args.config_custom
if self.args.mkdocs_path:
self.mkdocs_path = self.args.mkdocs_path
if self.args.command in ('simple', 'pydoc') and not self.args.subargs:
parser.error('need at least one argument')
def read_config(self):
"""
Reads and preprocesses the pydocmk2 configuration file.
"""
with open(self.config_path) as fp:
self.config = yaml.safe_load(fp)
self.default_config(blank=False)
def default_config(self, blank=False):
if blank:
self.config = {}
if not self.mkdocs_path:
if self.config.get('mkdocs_path', None):
self.mkdocs_path = self.config['mkdocs_path']
else:
self.mkdocs_path = os.path.join(self.config_dir, 'mkdocs.yml')
self.config.setdefault('docs_dir', 'srcdocs')
self.docs_dir = self.config['docs_dir']
if not os.path.isabs(self.docs_dir):
self.docs_dir = os.path.join(self.config_dir, self.docs_dir)
self.config.setdefault('gens_dir', '_build/mkdocs')
self.gens_dir = self.config['gens_dir']
if not os.path.isabs(self.gens_dir):
self.gens_dir = os.path.join(self.config_dir, self.gens_dir)
self.config.setdefault('site_dir', '_build/docs')
self.config.setdefault('pydocmk_pre_dir', None)
self.pre_dir = self.config['pydocmk_pre_dir']
if self.pre_dir:
if not os.path.isabs(self.pre_dir):
self.pre_dir = os.path.join(self.config_dir, self.pre_dir)
self.config.setdefault('pydocmk_post_dir', None)
self.post_dir = self.config['pydocmk_post_dir']
if self.post_dir:
if not os.path.isabs(self.post_dir):
self.post_dir = os.path.join(
self.config_dir, self.post_dir)
if self.command in ('simple', 'pydoc'):
self.config.setdefault('headers', 'markdown')
else:
self.config.setdefault('headers', 'markdown')
self.config.setdefault('theme', 'readthedocs')
self.config.setdefault('loader', 'pydocmk2.loader.PythonLoader')
self.config.setdefault(
'preprocessor', 'pydocmk2.preprocessors.pydocmk.Preprocessor')
self.config.setdefault('additional_search_paths', [])
self.config.setdefault('markdown_extensions', [
'attr_list', 'def_list', 'tables', 'abbr', 'admonition', 'codehilite', 'pymdownx.extrarawhtml'])
def write_temp_mkdocs_config(self):
"""
Generates a configuration for MkDocs on-the-fly from the pydocmk2
configuration and makes sure it gets removed when this program exists.
"""
ignored_keys = ('gens_dir', 'pages', 'headers', 'generate', 'loader',
'preprocessor', 'additional_search_paths', 'pydocmk_pre_dir', 'pydocmk_post_dir')
self.mk_config = {key: value for key,
value in self.config.items() if key not in ignored_keys}
self.mk_config['docs_dir'] = self.config['gens_dir']
if 'pages' in self.config:
self.mk_config['nav'] = self.config['pages']
with open(self.mkdocs_path, 'w') as fp:
yaml.dump(self.mk_config, fp)
def makedirs(self, path):
"""
Create the directory *path* if it does not already exist.
"""
if not os.path.isdir(path):
os.makedirs(path)
# Also process all pages to copy files outside of the docs_dir to the gens_dir.
def process_pages(self, data, gens_dir):
for key in data:
filename = data[key]
if isinstance(filename, str) and '<<' in filename:
filename, source = filename.split('<<')
filename, source = filename.rstrip(), os.path.join(
self.config_dir, source.lstrip())
outfile = os.path.join(gens_dir, filename)
self.makedirs(os.path.dirname(outfile))
shutil.copyfile(source, outfile)
data[key] = filename
elif isinstance(filename, dict):
self.process_pages(filename, gens_dir)
elif isinstance(filename, list):
[self.process_pages(x, gens_dir) for x in filename]
def clean_files(self):
if self.clean:
if os.path.isfile(self.mkdocs_path):
log('Removing file %s...' % (self.mkdocs_path))
os.remove(self.mkdocs_path)
if os.path.isdir(self.config.get('gens_dir', None)):
log('Cleaning generated folder %s...' % (self.gens_dir))
shutil.rmtree(self.gens_dir)
atexit.register(lambda: os.remove(self.mkdocs_path))
def copy_source_files(self, pages_required=True):
"""
Copies all files from the `docs_dir` to the `gens_dir` defined in the
*config*. It also takes the MkDocs `pages` configuration into account
and converts the special `<< INFILE` syntax by copying them to the
`gens_dir` as well.
"""
for path in self.config['additional_search_paths']:
path = os.path.abspath(path)
sys.path.append(path)
# Copy all template files from the source directory into our
# generated files directory.
log('Started copying source files...')
for root, dirs, files in os.walk(self.docs_dir):
rel_root = os.path.relpath(root, self.docs_dir)
for fname in files:
dest_fname = os.path.join(self.gens_dir, rel_root, fname)
self.makedirs(os.path.dirname(dest_fname))
shutil.copyfile(os.path.join(root, fname), dest_fname)
if 'pages' not in self.config:
if pages_required:
raise RuntimeError('pydocmk.yml does not have defined pages!')
return
for page in self.config['pages']:
self.process_pages(page, self.gens_dir)
def new_project(self):
with open('pydocmk.yml', 'w') as fp:
fp.write(
'site_name: Welcome to pydoc-markdown\ngenerate:\npages:\n- Home: index.md << ../README.md\n')
def main(self):
if self.command == 'new':
self.new_project()
return
if self.command == 'pydoc':
if len(self.subargs):
mod = self.subargs[0]
print(pydoc_html(mod))
return
if self.command not in ('simple', 'pydoc'):
self.read_config()
else:
self.default_config(blank=True)
if self.config_custom:
try:
config_custom = json.loads(self.config_custom)
self.config.update(config_custom)
except ValueError:
pass
loader = import_object(self.config['loader'])(self.config)
preproc = import_object(self.config['preprocessor'])(self.config)
if self.command not in ('simple', 'pydoc'):
self.mkdocs_path = os.path.join(
os.path.dirname(self.config_path), 'mkdocs.yml')
self.clean_files()
mkdocs_exist = os.path.isfile(self.mkdocs_path)
self.copy_source_files(pages_required=not mkdocs_exist)
if not mkdocs_exist:
log('Generating temporary MkDocs config...')
self.write_temp_mkdocs_config()
# Build the index and document structure first, we load the actual
# docstrings at a later point.
log('Building index...')
index = Index()
def add_sections(doc, object_names, depth=1):
if isinstance(object_names, list):
[add_sections(doc, x, depth) for x in object_names]
elif isinstance(object_names, dict):
for key, subsections in object_names.items():
add_sections(doc, key, depth)
add_sections(doc, subsections, depth + 1)
elif isinstance(object_names, str):
# Check how many levels of recursion we should be going.
expand_depth = len(object_names)
object_names = object_names.rstrip('+')
expand_depth -= len(object_names)
def create_sections(name, level):
if level > expand_depth:
return
log("Building %s" % (name))
index.new_section(doc, name, depth=depth + level,
header_type=self.config.get('headers', 'html'), pre_dir=self.pre_dir, post_dir=self.post_dir)
sort_order = self.config.get('sort')
if sort_order not in ('line', 'name'):
sort_order = 'line'
need_docstrings = 'docstring' in self.config.get(
'filter', ['docstring'])
for sub in dir_object(name, sort_order, need_docstrings):
sub = name + '.' + sub
sec = create_sections(sub, level + 1)
create_sections(object_names, 0)
else:
raise RuntimeError(object_names)
# Make sure that we can find modules from the current working directory,
# and have them take precedence over installed modules.
sys.path.insert(0, '.')
if self.command == 'simple':
# In simple mode, we generate a single document from the import
# names specified on the command-line.
doc = index.new_document('main.md')
add_sections(doc, self.subargs)
else:
for pages in self.config.get('generate') or []:
for fname, object_names in pages.items():
doc = index.new_document(fname)
add_sections(doc, object_names)
preproc.link_lookup = {}
for file, doc in index.documents.items():
for section in doc.sections:
preproc.link_lookup[section.identifier] = file
# Load the docstrings and fill the sections.
log('Started generating documentation...')
for doc in index.documents.values():
for section in filter(lambda s: s.identifier, doc.sections):
loader.load_section(section)
preproc.preprocess_section(section)
if self.command == 'simple':
for section in doc.sections:
section.render(sys.stdout)
return 0
# Write out all the generated documents.
for fname, doc in index.documents.items():
fname = os.path.join(self.gens_dir, fname)
self.makedirs(os.path.dirname(fname))
with open(fname, 'w') as fp:
log("Writing %s..." % (fname))
for section in doc.sections:
section.render(fp)
if self.command == 'generate':
return 0
log("Running 'mkdocs {}'".format(self.command))
sys.stdout.flush()
mk_args = [self.python_path, '-m', 'mkdocs',
self.command, '-f', self.mkdocs_path] + self.subargs
log(' '.join(mk_args))
try:
return subprocess.call(mk_args)
except KeyboardInterrupt:
return signal.SIGINT
if __name__ == '__main__':
pd = PyDocMk()
pd.parse_args()
| |
"""functions related to modeling"""
import pymel.internal.factories as _factories
import pymel.core.general as _general
if False:
from maya import cmds
else:
import pymel.internal.pmcmds as cmds # type: ignore[no-redef]
def pointPosition(*args, **kwargs):
return _general.datatypes.Point(cmds.pointPosition(*args, **kwargs))
def curve(*args, **kwargs):
"""
Maya Bug Fix:
- name parameter only applied to transform. now applies to shape as well
"""
# curve returns a transform
name = kwargs.pop('name', kwargs.pop('n', None))
res = _general.PyNode(cmds.curve(*args, **kwargs))
if name:
res.rename(name)
return res
def surface(*args, **kwargs):
"""
Maya Bug Fix:
- name parameter only applied to transform. now applies to shape as well
"""
# surface returns a shape
name = kwargs.pop('name', kwargs.pop('n', None))
res = _general.PyNode(cmds.surface(*args, **kwargs))
if name:
res.getParent().rename(name)
return res
# ------ Do not edit below this line --------
addMetadata = _factories.getCmdFunc('addMetadata')
@_factories.addCmdDocs
def alignCurve(*args, **kwargs):
res = cmds.alignCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def alignSurface(*args, **kwargs):
res = cmds.alignSurface(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def angleBetween(*args, **kwargs):
res = cmds.angleBetween(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
applyMetadata = _factories.getCmdFunc('applyMetadata')
@_factories.addCmdDocs
def arcLengthDimension(*args, **kwargs):
res = cmds.arcLengthDimension(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def arclen(*args, **kwargs):
res = cmds.arclen(*args, **kwargs)
wraps = _factories.simpleCommandWraps['arclen']
for func, wrapCondition in wraps:
if wrapCondition.eval(kwargs):
res = func(res)
break
return res
arubaNurbsToPoly = _factories.getCmdFunc('arubaNurbsToPoly')
@_factories.addCmdDocs
def attachCurve(*args, **kwargs):
res = cmds.attachCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def attachSurface(*args, **kwargs):
res = cmds.attachSurface(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def bevel(*args, **kwargs):
res = cmds.bevel(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def bevelPlus(*args, **kwargs):
res = cmds.bevelPlus(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
bezierAnchorPreset = _factories.getCmdFunc('bezierAnchorPreset')
bezierAnchorState = _factories.getCmdFunc('bezierAnchorState')
@_factories.addCmdDocs
def bezierCurveToNurbs(*args, **kwargs):
res = cmds.bezierCurveToNurbs(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
bezierInfo = _factories.getCmdFunc('bezierInfo')
blend2 = _factories.getCmdFunc('blend2')
blindDataType = _factories.getCmdFunc('blindDataType')
@_factories.addCmdDocs
def boundary(*args, **kwargs):
res = cmds.boundary(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
canCreateManip = _factories.getCmdFunc('canCreateManip')
changeSubdivComponentDisplayLevel = _factories.getCmdFunc('changeSubdivComponentDisplayLevel')
changeSubdivRegion = _factories.getCmdFunc('changeSubdivRegion')
@_factories.addCmdDocs
def circle(*args, **kwargs):
res = cmds.circle(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
circularFillet = _factories.getCmdFunc('circularFillet')
@_factories.addCmdDocs
def closeCurve(*args, **kwargs):
res = cmds.closeCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def closeSurface(*args, **kwargs):
res = cmds.closeSurface(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
coarsenSubdivSelectionList = _factories.getCmdFunc('coarsenSubdivSelectionList')
@_factories.addCmdDocs
def cone(*args, **kwargs):
res = cmds.cone(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
constructionHistory = _factories.getCmdFunc('constructionHistory')
createSubdivRegion = _factories.getCmdFunc('createSubdivRegion')
_curve = curve
@_factories.addCmdDocs
def curve(*args, **kwargs):
res = _curve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def curveIntersect(*args, **kwargs):
res = cmds.curveIntersect(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
curveOnSurface = _factories.getCmdFunc('curveOnSurface')
@_factories.addCmdDocs
def cylinder(*args, **kwargs):
res = cmds.cylinder(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
dataStructure = _factories.getCmdFunc('dataStructure')
@_factories.addCmdDocs
def detachCurve(*args, **kwargs):
res = cmds.detachCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def detachSurface(*args, **kwargs):
res = cmds.detachSurface(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
doubleProfileBirailSurface = _factories.getCmdFunc('doubleProfileBirailSurface')
duplicateCurve = _factories.getCmdFunc('duplicateCurve')
duplicateSurface = _factories.getCmdFunc('duplicateSurface')
@_factories.addCmdDocs
def editMetadata(*args, **kwargs):
res = cmds.editMetadata(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def extendCurve(*args, **kwargs):
res = cmds.extendCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def extendSurface(*args, **kwargs):
res = cmds.extendSurface(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def extrude(*args, **kwargs):
res = cmds.extrude(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def filletCurve(*args, **kwargs):
res = cmds.filletCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
filterExpand = _factories.getCmdFunc('filterExpand')
@_factories.addCmdDocs
def fitBspline(*args, **kwargs):
res = cmds.fitBspline(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
freeFormFillet = _factories.getCmdFunc('freeFormFillet')
geomToBBox = _factories.getCmdFunc('geomToBBox')
getMetadata = _factories.getCmdFunc('getMetadata')
@_factories.addCmdDocs
def globalStitch(*args, **kwargs):
res = cmds.globalStitch(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def grid(*args, **kwargs):
res = cmds.grid(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
hardenPointCurve = _factories.getCmdFunc('hardenPointCurve')
hasMetadata = _factories.getCmdFunc('hasMetadata')
illustratorCurves = _factories.getCmdFunc('illustratorCurves')
@_factories.addCmdDocs
def insertKnotCurve(*args, **kwargs):
res = cmds.insertKnotCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def insertKnotSurface(*args, **kwargs):
res = cmds.insertKnotSurface(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
intersect = _factories.getCmdFunc('intersect')
@_factories.addCmdDocs
def loft(*args, **kwargs):
res = cmds.loft(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
makeSingleSurface = _factories.getCmdFunc('makeSingleSurface')
manipOptions = _factories.getCmdFunc('manipOptions')
manipPivot = _factories.getCmdFunc('manipPivot')
moveVertexAlongDirection = _factories.getCmdFunc('moveVertexAlongDirection')
multiProfileBirailSurface = _factories.getCmdFunc('multiProfileBirailSurface')
nurbsBoolean = _factories.getCmdFunc('nurbsBoolean')
nurbsCopyUVSet = _factories.getCmdFunc('nurbsCopyUVSet')
@_factories.addCmdDocs
def nurbsCube(*args, **kwargs):
res = cmds.nurbsCube(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def nurbsCurveToBezier(*args, **kwargs):
res = cmds.nurbsCurveToBezier(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
nurbsEditUV = _factories.getCmdFunc('nurbsEditUV')
@_factories.addCmdDocs
def nurbsPlane(*args, **kwargs):
res = cmds.nurbsPlane(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
nurbsSelect = _factories.getCmdFunc('nurbsSelect')
@_factories.addCmdDocs
def nurbsSquare(*args, **kwargs):
res = cmds.nurbsSquare(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
nurbsToPoly = _factories.getCmdFunc('nurbsToPoly')
nurbsToPolygonsPref = _factories.getCmdFunc('nurbsToPolygonsPref')
@_factories.addCmdDocs
def nurbsToSubdiv(*args, **kwargs):
res = cmds.nurbsToSubdiv(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
nurbsToSubdivPref = _factories.getCmdFunc('nurbsToSubdivPref')
nurbsUVSet = _factories.getCmdFunc('nurbsUVSet')
@_factories.addCmdDocs
def offsetCurve(*args, **kwargs):
res = cmds.offsetCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
offsetCurveOnSurface = _factories.getCmdFunc('offsetCurveOnSurface')
@_factories.addCmdDocs
def offsetSurface(*args, **kwargs):
res = cmds.offsetSurface(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
planarSrf = _factories.getCmdFunc('planarSrf')
@_factories.addCmdDocs
def plane(*args, **kwargs):
res = cmds.plane(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
pointCurveConstraint = _factories.getCmdFunc('pointCurveConstraint')
pointOnCurve = _factories.getCmdFunc('pointOnCurve')
pointOnSurface = _factories.getCmdFunc('pointOnSurface')
pointPosition = _factories.addCmdDocs(pointPosition)
@_factories.addCmdDocs
def polyAppend(*args, **kwargs):
res = cmds.polyAppend(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyAppendVertex(*args, **kwargs):
res = cmds.polyAppendVertex(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyAutoProjection = _factories.getCmdFunc('polyAutoProjection')
polyAverageNormal = _factories.getCmdFunc('polyAverageNormal')
@_factories.addCmdDocs
def polyAverageVertex(*args, **kwargs):
res = cmds.polyAverageVertex(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyBevel(*args, **kwargs):
res = cmds.polyBevel(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyBevel3(*args, **kwargs):
res = cmds.polyBevel3(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyBlendColor = _factories.getCmdFunc('polyBlendColor')
@_factories.addCmdDocs
def polyBlindData(*args, **kwargs):
res = cmds.polyBlindData(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyBoolOp(*args, **kwargs):
res = cmds.polyBoolOp(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyBridgeEdge(*args, **kwargs):
res = cmds.polyBridgeEdge(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyCBoolOp(*args, **kwargs):
res = cmds.polyCBoolOp(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyCacheMonitor = _factories.getCmdFunc('polyCacheMonitor')
polyCanBridgeEdge = _factories.getCmdFunc('polyCanBridgeEdge')
polyCheck = _factories.getCmdFunc('polyCheck')
@_factories.addCmdDocs
def polyChipOff(*args, **kwargs):
res = cmds.polyChipOff(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyCircularize(*args, **kwargs):
res = cmds.polyCircularize(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyCircularizeEdge = _factories.getCmdFunc('polyCircularizeEdge')
polyCircularizeFace = _factories.getCmdFunc('polyCircularizeFace')
@_factories.addCmdDocs
def polyClean(*args, **kwargs):
res = cmds.polyClean(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyClipboard = _factories.getCmdFunc('polyClipboard')
@_factories.addCmdDocs
def polyCloseBorder(*args, **kwargs):
res = cmds.polyCloseBorder(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyCollapseEdge(*args, **kwargs):
res = cmds.polyCollapseEdge(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyCollapseFacet = _factories.getCmdFunc('polyCollapseFacet')
polyCollapseTweaks = _factories.getCmdFunc('polyCollapseTweaks')
polyColorBlindData = _factories.getCmdFunc('polyColorBlindData')
@_factories.addCmdDocs
def polyColorDel(*args, **kwargs):
res = cmds.polyColorDel(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyColorMod(*args, **kwargs):
res = cmds.polyColorMod(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyColorPerVertex(*args, **kwargs):
res = cmds.polyColorPerVertex(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyColorSet = _factories.getCmdFunc('polyColorSet')
polyCompare = _factories.getCmdFunc('polyCompare')
@_factories.addCmdDocs
def polyCone(*args, **kwargs):
res = cmds.polyCone(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyConnectComponents(*args, **kwargs):
res = cmds.polyConnectComponents(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyContourProjection = _factories.getCmdFunc('polyContourProjection')
@_factories.addCmdDocs
def polyCopyUV(*args, **kwargs):
res = cmds.polyCopyUV(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyCrease(*args, **kwargs):
res = cmds.polyCrease(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyCreateFacet = _factories.getCmdFunc('polyCreateFacet')
@_factories.addCmdDocs
def polyCube(*args, **kwargs):
res = cmds.polyCube(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, | |
#!/usr/bin/env python
# encoding: utf-8
"""
towerstruc.py
Created by <NAME> on 2012-01-20.
Copyright (c) NREL. All rights reserved.
HISTORY: 2012 created
-7/2014: R.D. Bugs found in the call to shellBucklingEurocode from towerwithFrame3DD. Fixed.
Also set_as_top added.
-10/2014: R.D. Merged back with some changes Andrew did on his end.
-12/2014: A.N. fixed some errors from the merge (redundant drag calc). pep8 compliance. removed several unneccesary variables and imports (including set_as_top)
- 6/2015: A.N. major rewrite. removed pBEAM. can add spring stiffness anywhere. can add mass anywhere.
can use different material props throughout.
- 7/2015 : R.D. modified to use commonse modules.
- 1/2018 : G.B. modified for easier use with other modules, reducing user input burden, and shifting more to commonse
"""
from __future__ import print_function
import numpy as np
from openmdao.api import Component, Group, Problem, IndepVarComp
from commonse.WindWaveDrag import AeroHydroLoads, CylinderWindDrag, CylinderWaveDrag
from commonse.environment import WindBase, WaveBase, LinearWaves, TowerSoil, PowerWind, LogWind
from commonse.tube import CylindricalShellProperties
from commonse.utilities import assembleI, unassembleI, nodal2sectional
from commonse import gravity, eps, NFREQ
from commonse.vertical_cylinder import CylinderDiscretization, CylinderMass, CylinderFrame3DD
#from fusedwind.turbine.tower import TowerFromCSProps
#from fusedwind.interface import implement_base
import commonse.UtilizationSupplement as Util
# -----------------
# Components
# -----------------
class TowerDiscretization(Component):
def __init__(self):
super(TowerDiscretization, self).__init__()
self.add_param('hub_height', val=0.0, units='m', desc='diameter at tower base')
self.add_param('z_end', val=0.0, units='m', desc='Last node point on tower')
self.add_output('height_constraint', val=0.0, units='m', desc='mismatch between tower height and desired hub_height')
def solve_nonlinear(self, params, unknowns, resids):
unknowns['height_constraint'] = params['hub_height'] - params['z_end']
def linearize(self, params, unknowns, resids):
J = {}
J['height_constraint','hub_height'] = 1
J['height_constraint','z_end'] = -1
return J
class TowerMass(Component):
def __init__(self, nPoints):
super(TowerMass, self).__init__()
self.add_param('cylinder_mass', val=np.zeros(nPoints-1), units='kg', desc='Total cylinder mass')
self.add_param('cylinder_cost', val=0.0, units='USD', desc='Total cylinder cost')
self.add_param('cylinder_center_of_mass', val=0.0, units='m', desc='z-position of center of mass of cylinder')
self.add_param('cylinder_section_center_of_mass', val=np.zeros(nPoints-1), units='m', desc='z position of center of mass of each can in the cylinder')
self.add_param('cylinder_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of cylinder about base [xx yy zz xy xz yz]')
self.add_output('tower_raw_cost', val=0.0, units='USD', desc='Total tower cost')
self.add_output('tower_mass', val=0.0, units='kg', desc='Total tower mass')
self.add_output('tower_center_of_mass', val=0.0, units='m', desc='z-position of center of mass of tower')
self.add_output('tower_section_center_of_mass', val=np.zeros(nPoints-1), units='m', desc='z position of center of mass of each can in the tower')
self.add_output('tower_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of tower about base [xx yy zz xy xz yz]')
def solve_nonlinear(self, params, unknowns, resids):
unknowns['tower_raw_cost'] = params['cylinder_cost']
unknowns['tower_mass'] = params['cylinder_mass'].sum()
unknowns['tower_center_of_mass'] = params['cylinder_center_of_mass']
unknowns['tower_section_center_of_mass'] = params['cylinder_section_center_of_mass']
unknowns['tower_I_base'] = params['cylinder_I_base']
def linearize(self, params, unknowns, resids):
npts = len(params['cylinder_section_center_of_mass'])
zeroPts = np.zeros(npts)
zero6 = np.zeros(6)
J = {}
J['tower_mass','cylinder_mass'] = np.ones(len(unknowns['cylinder_mass']))
J['tower_mass','cylinder_cost'] = 0.0
J['tower_mass','cylinder_center_of_mass'] = 0.0
J['tower_mass','cylinder_section_center_of_mass'] = zeroPts
J['tower_mass','cylinder_I_base'] = zero6
J['tower_raw_cost','cylinder_mass'] = np.zeros(len(unknowns['cylinder_mass']))
J['tower_raw_cost','cylinder_cost'] = 1.0
J['tower_raw_cost','cylinder_center_of_mass'] = 0.0
J['tower_raw_cost','cylinder_section_center_of_mass'] = zeroPts
J['tower_raw_cost','cylinder_I_base'] = zero6
J['tower_center_of_mass','cylinder_mass'] = 0.0
J['tower_center_of_mass','cylinder_cost'] = 0.0
J['tower_center_of_mass','cylinder_center_of_mass'] = 1.0
J['tower_center_of_mass','cylinder_section_center_of_mass'] = zeroPts
J['tower_center_of_mass','cylinder_I_base'] = zero6
J['tower_section_center_of_mass','cylinder_mass'] = 0.0
J['tower_section_center_of_mass','cylinder_cost'] = 0.0
J['tower_section_center_of_mass','cylinder_center_of_mass'] = 0.0
J['tower_section_center_of_mass','cylinder_section_center_of_mass'] = np.eye(npts)
J['tower_section_center_of_mass','cylinder_I_base'] = np.zeros((npts,6))
J['tower_I_base','cylinder_mass'] = 1.0
J['tower_I_base','cylinder_cost'] = 0.0
J['tower_I_base','cylinder_center_of_mass'] = 0.0
J['tower_I_base','cylinder_section_center_of_mass'] = np.zeros((6,npts))
J['tower_I_base','cylinder_I_base'] = np.eye(len(params['cylinder_I_base']))
return J
class TurbineMass(Component):
def __init__(self):
super(TurbineMass, self).__init__()
self.add_param('hubH', val=0.0, units='m', desc='Hub-height')
self.add_param('rna_mass', val=0.0, units='kg', desc='Total tower mass')
self.add_param('rna_I', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of rna about tower top [xx yy zz xy xz yz]')
self.add_param('rna_cg', np.zeros((3,)), units='m', desc='xyz-location of rna cg relative to tower top')
self.add_param('tower_mass', val=0.0, units='kg', desc='Total tower mass')
self.add_param('tower_center_of_mass', val=0.0, units='m', desc='z-position of center of mass of tower')
self.add_param('tower_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of tower about base [xx yy zz xy xz yz]')
self.add_output('turbine_mass', val=0.0, units='kg', desc='Total mass of tower+rna')
self.add_output('turbine_center_of_mass', val=np.zeros((3,)), units='m', desc='xyz-position of tower+rna center of mass')
self.add_output('turbine_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of tower about base [xx yy zz xy xz yz]')
# Derivatives
self.deriv_options['type'] = 'fd'
self.deriv_options['form'] = 'central'
self.deriv_options['step_calc'] = 'relative'
self.deriv_options['step_size'] = 1e-5
def solve_nonlinear(self, params, unknowns, resids):
unknowns['turbine_mass'] = params['rna_mass'] + params['tower_mass']
cg_rna = params['rna_cg'] + np.array([0.0, 0.0, params['hubH']])
cg_tower = np.array([0.0, 0.0, params['tower_center_of_mass']])
unknowns['turbine_center_of_mass'] = (params['rna_mass']*cg_rna + params['tower_mass']*cg_tower) / unknowns['turbine_mass']
R = cg_rna
I_tower = assembleI(params['tower_I_base'])
I_rna = assembleI(params['rna_I']) + params['rna_mass']*(np.dot(R, R)*np.eye(3) - np.outer(R, R))
unknowns['turbine_I_base'] = unassembleI(I_tower + I_rna)
class TowerPreFrame(Component):
def __init__(self, nFull):
super(TowerPreFrame, self).__init__()
self.add_param('z', np.zeros(nFull), units='m', desc='location along tower. start at bottom and go to top')
# extra mass
self.add_param('mass', 0.0, units='kg', desc='added mass')
self.add_param('mI', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia about some point p [xx yy zz xy xz yz]')
self.add_param('mrho', np.zeros((3,)), units='m', desc='xyz-location of p relative to node')
# point loads
self.add_param('rna_F', np.zeros((3,)), units='N', desc='rna force')
self.add_param('rna_M', np.zeros((3,)), units='N*m', desc='rna moment')
# Monopile handling
self.add_param('k_monopile', np.zeros(6), units='N/m', desc='Stiffness BCs for ocean soil. Only used if monoflag inputis True')
self.add_param('monopile', False, desc='Flag for monopile BCs', pass_by_obj=True)
# spring reaction data. Use float('inf') for rigid constraints.
nK = 1
self.add_output('kidx', np.zeros(nK, dtype=np.int_), desc='indices of z where external stiffness reactions should be applied.', pass_by_obj=True)
self.add_output('kx', np.zeros(nK), units='m', desc='spring stiffness in x-direction', pass_by_obj=True)
self.add_output('ky', np.zeros(nK), units='m', desc='spring stiffness in y-direction', pass_by_obj=True)
self.add_output('kz', np.zeros(nK), units='m', desc='spring stiffness in z-direction', pass_by_obj=True)
self.add_output('ktx', np.zeros(nK), units='m', desc='spring stiffness in theta_x-rotation', pass_by_obj=True)
self.add_output('kty', np.zeros(nK), units='m', desc='spring stiffness in theta_y-rotation', pass_by_obj=True)
self.add_output('ktz', np.zeros(nK), units='m', desc='spring stiffness in theta_z-rotation', pass_by_obj=True)
# extra mass
nMass = 1
self.add_output('midx', np.zeros(nMass, dtype=np.int_), desc='indices where added mass should be applied.', pass_by_obj=True)
self.add_output('m', np.zeros(nMass), units='kg', desc='added mass')
self.add_output('mIxx', np.zeros(nMass), units='kg*m**2', desc='x mass moment of inertia about some point p')
self.add_output('mIyy', np.zeros(nMass), units='kg*m**2', desc='y mass moment of inertia about some point p')
self.add_output('mIzz', np.zeros(nMass), units='kg*m**2', desc='z mass moment of inertia about some point p')
self.add_output('mIxy', np.zeros(nMass), units='kg*m**2', desc='xy mass moment of inertia about some point p')
self.add_output('mIxz', np.zeros(nMass), units='kg*m**2', desc='xz mass moment of inertia about some point p')
self.add_output('mIyz', np.zeros(nMass), units='kg*m**2', desc='yz mass moment of inertia about some point p')
self.add_output('mrhox', np.zeros(nMass), units='m', desc='x-location of p relative to node')
self.add_output('mrhoy', np.zeros(nMass), units='m', desc='y-location of p relative to node')
self.add_output('mrhoz', np.zeros(nMass), units='m', desc='z-location of p relative to node')
# point loads (if addGravityLoadForExtraMass=True be sure not to double count by adding those force here also)
nPL = 1
self.add_output('plidx', np.zeros(nPL, dtype=np.int_), desc='indices where point loads should be applied.', pass_by_obj=True)
self.add_output('Fx', np.zeros(nPL), units='N', desc='point force in x-direction')
self.add_output('Fy', np.zeros(nPL), units='N', desc='point force in y-direction')
self.add_output('Fz', np.zeros(nPL), units='N', desc='point force in z-direction')
self.add_output('Mxx', np.zeros(nPL), units='N*m', desc='point moment about x-axis')
self.add_output('Myy', np.zeros(nPL), units='N*m', desc='point moment about y-axis')
self.add_output('Mzz', np.zeros(nPL), units='N*m', desc='point moment about z-axis')
def solve_nonlinear(self, params, unknowns, resids):
# Prepare for reactions: rigid at tower base
unknowns['kidx'] = np.array([ 0 ], dtype=np.int_)
if params['monopile']:
kmono = params['k_monopile']
unknowns['kx'] = np.array([ kmono[0] ])
unknowns['ky'] = np.array([ kmono[2] ])
unknowns['kz'] = np.array([ kmono[4] ])
unknowns['ktx'] = np.array([ kmono[1] ])
unknowns['kty'] = np.array([ kmono[3] ])
unknowns['ktz'] = np.array([ kmono[5] ])
else:
unknowns['kx'] = np.array([ np.inf ])
unknowns['ky'] = np.array([ np.inf ])
unknowns['kz'] = np.array([ np.inf ])
unknowns['ktx'] = np.array([ np.inf ])
unknowns['kty'] = np.array([ np.inf ])
unknowns['ktz'] = np.array([ np.inf ])
# Prepare RNA for "extra node mass"
unknowns['midx'] = np.array([ len(params['z'])-1 ], dtype=np.int_)
unknowns['m'] = np.array([ params['mass'] ])
unknowns['mIxx'] = np.array([ params['mI'][0] ])
unknowns['mIyy'] = np.array([ params['mI'][1] ])
unknowns['mIzz'] = np.array([ params['mI'][2] ])
unknowns['mIxy'] = np.array([ params['mI'][3] ])
unknowns['mIxz'] = np.array([ params['mI'][4] ])
unknowns['mIyz'] = np.array([ params['mI'][5] ])
unknowns['mrhox'] = np.array([ params['mrho'][0] ])
unknowns['mrhoy'] = np.array([ params['mrho'][1] ])
unknowns['mrhoz'] = np.array([ params['mrho'][2] ])
# Prepare point forces at RNA node
unknowns['plidx'] = np.array([ len(params['z'])-1 ], dtype=np.int_)
unknowns['Fx'] = np.array([ params['rna_F'][0] ])
unknowns['Fy'] = np.array([ params['rna_F'][1] ])
unknowns['Fz'] = np.array([ params['rna_F'][2] ])
unknowns['Mxx'] = np.array([ params['rna_M'][0] ])
unknowns['Myy'] = np.array([ params['rna_M'][1] ])
unknowns['Mzz'] = np.array([ params['rna_M'][2] ])
def list_deriv_vars(self):
inputs = ('mass', 'mI', 'mrho', 'rna_F', 'rna_M')
outputs = ('m', 'mIxx', 'mIyy', 'mIzz', 'mIxy', 'mIxz', 'mIyz', 'Fx', 'Fy', 'Fz', 'Mxx', 'Myy', 'Mzz')
return inputs, outputs
def linearize(self, params, unknowns, resids):
J = {}
inp,out = self.list_deriv_vars()
for o in out:
for i in inp:
J[o,i] = np.zeros( (len(unknowns[o]), len(params[i])) )
J['m','mass'] = 1.0
J['mIxx','mI'] = np.eye(6)[0,:]
J['mIyy','mI'] = np.eye(6)[1,:]
J['mIzz','mI'] = np.eye(6)[2,:]
J['mIxy','mI'] = np.eye(6)[3,:]
J['mIxz','mI'] = np.eye(6)[4,:]
J['mIyz','mI'] = np.eye(6)[5,:]
J['Fx','rna_F'] = np.eye(3)[0,:]
J['Fy','rna_F'] = np.eye(3)[2,:]
J['Fz','rna_F'] = np.eye(3)[2,:]
J['Mxx','rna_M'] = | |
but the list of '
f'legend_labels is length {len(legend_labels)}.'
)
else:
legend_labels = self.categories
try:
self.ax.legend(
patches, legend_labels, numpoints=1,
**legend_kwargs, **addtl_legend_kwargs
)
except TypeError:
raise ValueError(
f'The plot is in categorical legend mode, implying a '
f'"matplotlib.legend.Legend" legend object. However, "legend_kwarg" '
f'contains unexpected keyword arguments not supported by this legend type.'
f' Are you sure you are not accidentally passing continuous '
f'"matplotlib.colorbar.Colorbar" legend parameters instead?'
f'\n\n'
f'For a '
f'reference on the valid keyword parameters, see the matplotlib '
f'documentation at '
f'https://matplotlib.org/{MATPLOTLIB_DOCS_VERSION}/api/legend_api.html#'
f'matplotlib.legend.Legend . To learn more about the difference '
f'between these two legend modes, refer to the geoplot documentation '
f'at https://residentmario.github.io/geoplot/user_guide/'
f'Customizing_Plots.html#legend .'
)
else: # self.k is None
if len(legend_marker_kwargs) > 0:
raise ValueError(
'"k" is set to "None", implying a colorbar legend, but "legend_kwargs" '
'includes marker parameters that can only be applied to a patch legend. '
'Remove these parameters or convert to a categorical colormap by '
'specifying a "k" value.'
)
if legend_labels is not None or legend_values is not None:
# TODO: implement this feature
raise NotImplementedError(
'"k" is set to "None", implying a colorbar legend, but "legend_labels" '
'and/or "legend_values" are also specified. These parameters do not '
'apply in the case of a colorbar legend and should be removed.'
)
self.cmap.set_array(self.hue)
try:
plt.gcf().colorbar(self.cmap, ax=self.ax, **legend_kwargs)
except TypeError:
raise ValueError(
f'The plot is in continuous legend mode, implying a '
f'"matplotlib.colorbar.Colorbar" legend object. However, "legend_kwarg" '
f'contains unexpected keyword arguments not supported by this legend type.'
f' Are you sure you are not accidentally passing categorical '
f'"matplotlib.legend.Legend" legend parameters instead?'
f'\n\n'
f'For a '
f'reference on the valid keyword parameters, see the matplotlib '
f'documentation at '
f'https://matplotlib.org/{MATPLOTLIB_DOCS_VERSION}/api/colorbar_api.html#'
f'matplotlib.colorbar.Colorbar . To learn more about the difference '
f'between these two legend modes, refer to the geoplot documentation '
f'at https://residentmario.github.io/geoplot/user_guide/'
f'Customizing_Plots.html#legend .'
)
elif legend and legend_var == 'scale':
if legend_values is None:
# If the user doesn't specify their own legend_values, apply a reasonable
# default: a five-point linear array from max to min. The sort order (max to min,
# not min to max) is important because ax.legend, the matplotlib function these
# values are ultimately passed to, sorts the patches in ascending value order
# internally. Even though we pass no patch ordering information to ax.legend,
# it still appears to determine an ordering by inspecting plot artists
# automagically. In the case where there is no colormap, however, the patch order
# we pass is preserved.
#
# The TLDR is that it's possible to control scale legend patch order (and make it
# ascending or descending) in a non-hue plot, in all other cases legend patch order
# is locked to ascending, so for consistency across the API we use ascending order
# in this case as well.
legend_values = np.linspace(
np.max(self.scale), np.min(self.scale), num=5, dtype=self.scale.dtype
)[::-1]
if legend_labels is None:
# If the user doesn't specify their own legend_labels, apply a reasonable
# default: the 'g' f-string for the given input value.
legend_labels = ['{0:g}'.format(value) for value in legend_values]
# If the user specifies a markerfacecolor explicity via legend_params, use that.
#
# Otherwise, use an open-circle design when hue is not None, so as not to confuse
# viewers with colors in the scale mapping to values that do not correspond with the
# plot points. But if there is no hue, it's better to have the legend markers be as
# close to the plot markers as possible, so in that case the points are filled-in with
# the corresponding plot color value. This is controlled by self.colors and, in the
# case where hue is None, will be an n-length list of the same color value or name, so
# we can grab that by taking the first element of self.colors.
if 'markerfacecolor' in legend_marker_kwargs:
markerfacecolors = [legend_marker_kwargs['markerfacecolor']] * len(legend_values)
legend_marker_kwargs.pop('markerfacecolor')
elif self.hue is None:
markerfacecolors = [self.colors[0]] * len(legend_values)
else:
markerfacecolors = ['None'] * len(legend_values)
markersizes = [self.dscale(d) * scale_multiplier for d in legend_values]
# If the user provides a markeredgecolor in legend_kwargs, use that. Otherwise, default
# to a steelblue or black markeredgecolor, depending on whether hue is defined.
if 'markeredgecolor' in legend_marker_kwargs:
markeredgecolor = legend_marker_kwargs.pop('markeredgecolor')
elif self.hue is None:
markeredgecolor = 'steelblue'
else:
markeredgecolor = 'black'
# If the user provides a markersize in legend_kwargs, but the legend is in
# scale mode, raise an error, as setting this markersize would invalidate the
# utility of the legend.
if 'markersize' in legend_marker_kwargs:
raise ValueError(
'Cannot set a "markersize" when the "legend_var" is set to "scale". '
'Doing so would remove the scale reference, rendering the legend '
'useless.'
)
marker_kwargs = {
'marker': "o", 'markeredgecolor': markeredgecolor
}
marker_kwargs.update(legend_marker_kwargs)
patches = []
for markerfacecolor, markersize in zip(
markerfacecolors, markersizes
):
patches.append(
mpl.lines.Line2D(
[0], [0], linestyle='None',
markersize=markersize,
markerfacecolor=markerfacecolor,
**marker_kwargs
)
)
if len(patches) != len(legend_labels):
raise ValueError(
f'The list of legend values is length {len(patches)}, but the list of '
f'legend_labels is length {len(legend_labels)}.'
)
self.ax.legend(
patches, legend_labels, numpoints=1, **legend_kwargs, **addtl_legend_kwargs
)
class ClipMixin:
"""
Class container for clip-setter code shared across all plots that support clip.
Note that there are two different routines for clipping a plot:
* Drawing an inverted polyplot as the top layer. Implemented in `paint_clip`. Advantage is
that it is fast, disadvantage is that the resulting plot can't be applied to a webmap.
* Intersecting each geometry with the unary union of the clip geometries. This is a slower
but more broadly compatible process. It's also quite fast if the clip geometry used is
relatively simple, but this requires conscious effort the user (we can't simplify
automatically unfortunately).
KDEPlot uses the first method because it relies on `seaborn` underneath, and there is no way
to clip an existing axis painter (that I am aware of). All other plots use the second method.
"""
def set_clip(self, gdf):
clip = self.kwargs.pop('clip')
clip = _to_geom_geoseries(gdf, clip, "clip")
if clip is not None:
clip_shp = clip.unary_union
gdf = gdf.assign(
geometry=gdf.geometry.intersection(clip_shp)
)
null_geoms = gdf.geometry.isnull()
# Clipping may result in null geometries. We warn about this here, but it is = the
# responsibility of the plot draw procedure to perform the actual plot exclusion.
if null_geoms.any():
warnings.warn(
f'The input data contains {null_geoms.sum()} data points that do not '
f'intersect with "clip". These data points will not appear in the plot.'
)
return gdf
@staticmethod
def _get_clip(extent, clip):
xmin, ymin, xmax, ymax = extent
# We have to add a little bit of padding to the edges of the box, as otherwise the edges
# will invert a little, surprisingly.
rect = shapely.geometry.Polygon(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin)]
)
rect = shapely.affinity.scale(rect, xfact=1.25, yfact=1.25)
for geom in clip:
rect = rect.symmetric_difference(geom)
return rect
def paint_clip(self):
clip = self.kwargs.pop('clip')
clip = _to_geom_geoseries(self.df, clip, "clip")
if clip is not None:
if self.projection is not None:
xmin, xmax, ymin, ymax = self.ax.get_extent(crs=ccrs.PlateCarree())
extent = (xmin, ymin, xmax, ymax)
clip_geom = self._get_clip(extent, clip)
feature = ShapelyFeature([clip_geom], ccrs.PlateCarree())
self.ax.add_feature(feature, facecolor=(1, 1, 1), linewidth=0, zorder=2)
else:
xmin, xmax = self.ax.get_xlim()
ymin, ymax = self.ax.get_ylim()
extent = (xmin, ymin, xmax, ymax)
clip_geom = self._get_clip(extent, clip)
xmin, xmax = self.ax.get_xlim()
ymin, ymax = self.ax.get_ylim()
polyplot(
gpd.GeoSeries(clip_geom), facecolor='white', linewidth=0, zorder=2,
extent=extent, ax=self.ax
)
class QuadtreeComputeMixin:
"""
Class container for computing a quadtree.
"""
def compute_quadtree(self):
nmin = self.kwargs.pop('nmin')
nmax = self.kwargs.pop('nmax')
hue = self.kwargs.get('hue', None)
df = gpd.GeoDataFrame(self.df, geometry=self.df.geometry)
hue = _to_geoseries(df, hue, "hue")
if hue is not None:
# TODO: what happens in the case of a column name collision?
df = df.assign(hue_col=hue)
# set reasonable defaults for the n-params
nmax = nmax if nmax else len(df)
nmin = nmin if nmin else np.max([1, np.round(len(df) / 100)]).astype(int)
# Jitter the points. Otherwise | |
"""
This module generates Inverse Discrete Hartley Transform matrix (IDHT). |br|
Frequencies represented by the rows of the generated IDHT matrix:
freq. (cos) (sin)
^ /. /.
| / . / .
| / . / .
| / . / .
| / . / .
| / . / .
| / . / .
| / . / .
|/ . / .
|1-------N----------2N---> indices of columns
.
N + 1
where N is the number of tones in the dictionary.
OR:
if the **bFreqSym** [frequency symmetrical] flag is set, then the frequencies
are organized like this:
freq. (cos) (sin)
^ /. \
| / . \
| / . \
| / . \
| / . \
| / . \
| / . \
| / . \
|/ . \
|1-------N----------2N---> indices of columns
.
N + 1
(the **bFreqSym** flag was added in v2.1, 14 January 2016).
*Examples*:
Please go to the *examples/dictionaries* directory for examples on how to
use the dictionary generator. |br|
*Settings*:
Parameters of the generator are described below.
Take a look on '__parametersDefine' function for more info on the
parameters.
Parameters of the dictionary generator are attributes of the class which
must/can be set before the generator is run.
Required parameters:
- a. **tS** (*float*): time of input signals
- b. **fR** (*float*): input signals' representation sampling frequency
- c. **fDelta** (*float*): the frequency separation between tones
- d. **nTones** (*float*): the number of tones in the dictionary
Optional parameters:
- e. **tStart** (*float*): the time shift of starting time point [default = 0]
- f, **fFirst** (*float*): the first frequency in the spectrum [default = fDelta]
- g. **bMute** (*int*): mute the console output from the sampler [default = 0]
*Output*:
Description of the dictionary generator output is below.
This is the list of attributes of the generator class which are available
after calling the 'run' method:
- a. **mDict** (*Numpy array 2D*): the generated dictionary, one tone in a row
- b. **vT** (*Numpy array 1D*): time vector for the dictionary
- c. **vF** (*Numpy array 1D*): frequency vector for the dictionary
Additional parameters of the generated dictionary:
- d. **Tg** (*float*): dictionary time representation period
- e. **nSamp** (*int*): the number of time representation samples
- f. **bFreqSym** (*int*): symmetrical/non-symmetrical frequency distribution flag
*Author*:
<NAME>, Aalborg University, Denmark. <<EMAIL>>
*Version*:
1.0 | 13-JAN-2015 : * Initial version. |br|
1.0r1 | 15-JAN-2015 : * Improvements in code comments |br|
2,0 | 20-AUG-2015 : * Version 2.0 released |br|
2.0r1 | 25-AUG-2015 : * Improvements in code comments and in headers |br|
2.1 | 14-JAN-2016 : * Frequencies of tones may be organized symetrical |br|
2.1r1 | 15-JAN-2016 : * Bug in entering the silent mode is repaired |br|
2.2 | 18-JAN-2016 : * Function 'freqRange' which gives indices of columns corresponding to a given frequency
range is added |br|
*License*:
BSD 2-Clause
"""
from __future__ import division
import rxcs
import numpy as np
class IDHT(rxcs._RxCSobject):
def __init__(self, *args):
rxcs._RxCSobject.__init__(self) # Make it a RxCS object
self.strRxCSgroup = 'Dictionary generator' # Name of group of RxCS modules
self.strModuleName = 'IDHT' # Module name
self.__parametersDefine() # Define the parameters
# Define parameters
def __parametersDefine(self):
# Time of the signal [s]
self.paramAddMan('tS', 'Time of the signal', unit='s')
self.paramType('tS', (int, float))
self.paramH('tS', 0)
self.paramL('tS', np.inf)
# The dictionary representation sampling freuqency [Hz]
self.paramAddMan('fR', 'The dictionary representation sampling freuqency', unit='Hz')
self.paramType('fR', (int, float))
self.paramH('fR', 0)
self.paramL('fR', np.inf)
# The optional time shift of starting time point
self.paramAddOpt('tStart', 'The time shift of starting time point', unit='s', default=0)
self.paramType('tStart', (int, float))
self.paramH('tStart', -np.inf)
self.paramL('tStart', np.inf)
# The frequency separation between tones [Hz]
self.paramAddMan('fDelta', 'The frequency separation between tones', unit='Hz')
self.paramType('fDelta', (int, float))
self.paramH('fDelta', 0)
self.paramL('fDelta', np.inf)
# The number of tones
self.paramAddMan('nTones', 'The number of tones')
self.paramType('nTones', int)
self.paramH('nTones', 0)
self.paramL('nTones', np.inf)
# The first frequency in the spectrum
self.paramAddOpt('fFirst', 'The first frequency in the spectrum', unit='Hz', default='$$fDelta')
self.paramType('fFirst', (int, float))
self.paramH('fFirst', 0)
self.paramL('fFirst', np.inf)
# The 'symmetrical frequency distribution' flag
self.paramAddOpt('bFreqSym', 'Symmetrical frequency distribution', default=0)
self.paramType('bFreqSym', (int))
self.paramAllowed('bFreqSym',[0, 1]) # It can be either 1 or 0
# 'Mute the output' flag
self.paramAddOpt('bMute', 'Mute the output', noprint=1, default=0)
self.paramType('bMute', int) # Must be of int type
self.paramAllowed('bMute',[0, 1]) # It can be either 1 or 0
# Run
def run(self):
self.parametersCheck() # Check if all the needed partameters are in place and are correct
self.parametersPrint() # Print the values of parameters
self.__engine() # Run the engine
return self.__dict__ # Return dictionary with the parameters
# Engine of the function
def __engine(self):
# Check of the configuration make sense
self._checkConf()
# Compute time and frequency parameters of dictionaries
(self.Tg, self.nSamp, self.tEnd) = self._computeParamT(self.tS, self.fR, self.tStart)
(self.fFirstHigh, self.fHigh) = self._computeParamF(self.fDelta, self.nTones, self.fFirst)
# Print some additional time and frequency parameters of the dictionary
self._printExtraParam()
self.engineStartsInfo() # Info that the engine starts
self.vF = self._generateFVector(self.fFirstHigh, self.fDelta, self.nTones) # Frequency vector
self.vT = self._generateTVector(self.Tg, self.nSamp, self.tStart) # Time vector
(self.mDict, self.vF) = self._generateIDHT(self.vT, self.vF) # The dicionary matrix
self.engineStopsInfo() # Info that the engine ends
return
# Check configuration
def _checkConf(self):
"""
This function checks if the configuration for the generator is correct
"""
# Check if the first frequency in the spectrum is compatible with the
# frequency separation between tones
nTonesStart = self.fFirst / self.fDelta
if not self.isequal(nTonesStart, np.round(nTonesStart), 1e-6):
strE = 'The first frequency in the spectrum (fFirst) is '
strE = strE + 'incompatible with the frequency separation between tones (fDelta)!'
raise ValueError(strE)
# Check if the time represented by dictionary is compatible
# with the representation sampling period
nSmp = self.tS * self.fR # The number of signal samples
if not self.isequal(nSmp, np.round(nSmp), 1e-6):
strE = 'Time represented by dictionary (tS) is incompatible with '
strE = strE + 'the dictionary representation sampling freuqency (fS)!'
raise ValueError(strE)
# Check if the optional time shift of starting time point is compatible
# with the representation sampling period
nSmptStart = self.tStart * self.fR
if not self.isequal(nSmptStart, np.round(nSmptStart), 1e-6):
strE = 'Time shift of starting time point (tS) is incompatible with '
strE = strE + 'the dictionary representation sampling frequency (fS)!'
raise ValueError(strE)
# Check Nyquist
fMax = self.fFirst + self.fDelta * (self.nTones - 1)
if not (self.fR > 2 * fMax):
strW = 'WARNING! The representation sampling frequency (fR) is to low! '
strW = strW + '(Ever heard about Nyqist principle?)'
rxcs.console.newline()
rxcs.console.warning(strW)
# -----------------------------------------------------------------
return
# Compute time parameters of dictionary
def _computeParamT(self, tS, fR, tStart):
"""
This function computes additional time parameters of the dictionary.
Args:
tS (float): time of input signals
fR (float): input signals' representation sampling frequency
tStart (float): the time shift of starting time point
Returns:
Tg (float): dictionary time representation period
nSamp (int): the number of time representation samples
tEnd (float): dictionary representation time end
"""
# The signal representation period
Tg = 1/fR
# The number of signal samples
nSamp = int(np.round(tS / Tg))
# Signal time end
tEnd = tStart + tS
return (Tg, nSamp, tEnd)
# Compute frequency parameters of dictionaries
def _computeParamF(self, fDelta, nTones, fFirst):
"""
This function computes additional frequency parameters of the dictionary.
Args:
fDelta (float): the frequency separation between tones
nTones (int): the number of tones in the dictionary
fFirst (float): the first frequency in the spectrum
Returns:
fFirstHigh (float): the positive low frequency limit of the dictionary
fHigh (float): the positive high frequency limit of the dictionary
"""
# The positive low frequency limit of the dictionary
fFirstHigh = np.floor(fFirst/fDelta) * fDelta
# The positive high frequency limit of the | |
import argparse
import random
import os
import time
import datetime
import itertools
import numpy as np
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
from torchvision import transforms, utils
from model import Generator, Discriminator
from dataset import FFHQ_Dataset
from distributed import (
reduce_loss_dict,
reduce_sum,
get_world_size,
)
from Util.network_util import Build_Generator_From_Dict, Get_Network_Shape
from Util.pruning_util import Get_Network_Score_List, Get_Uniform_RmveList, Generate_Prune_Mask_List
from Util.mask_util import Mask_the_Generator
from Util.Calculators import Styled_Conv_FLOPCal, GENERATOR_FLOPS_256PX
from fid import Get_Model_FID_Score
import lpips
from Util.GAN_Slimming_Util import perceptual_loss, VGGFeature
# Hyper-parameters for training!
import train_sparsity_hyperparams
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default=train_sparsity_hyperparams.data_folder)
parser.add_argument('--size', type=int, default=train_sparsity_hyperparams.generated_img_size)
parser.add_argument('--channel_multiplier', type=int, default=train_sparsity_hyperparams.channel_multiplier)
parser.add_argument('--latent', type=int, default=train_sparsity_hyperparams.latent)
parser.add_argument('--n_mlp', type=int, default=train_sparsity_hyperparams.n_mlp)
parser.add_argument('--ckpt', type=str, default=train_sparsity_hyperparams.ckpt)
parser.add_argument('--load_train_state', type=bool, default=train_sparsity_hyperparams.load_train_state)
parser.add_argument('--iter', type=int, default=train_sparsity_hyperparams.training_iters)
parser.add_argument('--batch', type=int, default=train_sparsity_hyperparams.batch_size)
parser.add_argument('--lr', type=float, default=train_sparsity_hyperparams.init_lr)
parser.add_argument('--r1', type=float, default=train_sparsity_hyperparams.discriminator_r1)
parser.add_argument('--path_regularize', type=float, default=train_sparsity_hyperparams.generator_path_reg_weight)
parser.add_argument('--path_batch_shrink', type=int, default=train_sparsity_hyperparams.path_reg_batch_shrink)
parser.add_argument('--d_reg_every', type=int, default=train_sparsity_hyperparams.d_reg_freq)
parser.add_argument('--g_reg_every', type=int, default=train_sparsity_hyperparams.g_reg_freq)
parser.add_argument('--mixing', type=float, default=train_sparsity_hyperparams.noise_mixing)
parser.add_argument('--sparsity_eta', type=float, default=train_sparsity_hyperparams.sparsity_eta)
parser.add_argument('--init_step', type=float, default=train_sparsity_hyperparams.init_step)
parser.add_argument('--model_prune_freq', type=float, default=train_sparsity_hyperparams.model_prune_freq)
parser.add_argument('--lay_rmve_ratio', type=float, default=train_sparsity_hyperparams.lay_rmve_ratio)
parser.add_argument('--num_rmve_channel', type=float, default=train_sparsity_hyperparams.num_rmve_channel)
parser.add_argument('--prune_metric', type=str, default=train_sparsity_hyperparams.prune_metric)
parser.add_argument('--pruning_mode', type=str, default=train_sparsity_hyperparams.pruning_mode)
parser.add_argument('--n_sample', type=int, default=train_sparsity_hyperparams.val_sample_num)
parser.add_argument('--val_sample_freq', type=int, default=train_sparsity_hyperparams.val_sample_freq)
parser.add_argument('--model_save_freq', type=int, default=train_sparsity_hyperparams.model_save_freq)
parser.add_argument('--fid_n_sample', type=int, default=train_sparsity_hyperparams.fid_n_sample)
parser.add_argument('--fid_batch', type=int, default=train_sparsity_hyperparams.fid_batch)
parser.add_argument('--teacher_ckpt', type=str, default=train_sparsity_hyperparams.teacher)
parser.add_argument('--kd_l1_lambda', type=float, default=train_sparsity_hyperparams.kd_l1_lambda)
parser.add_argument('--kd_percept_lambda', type=float, default=train_sparsity_hyperparams.kd_percept_lambda)
parser.add_argument('--kd_l1_mode', type=str, default=train_sparsity_hyperparams.kd_l1_mode)
parser.add_argument('--kd_percept_mode', type=str, default=train_sparsity_hyperparams.kd_percept_mode)
args = parser.parse_args()
n_gpu = len(train_sparsity_hyperparams.gpu_device_ids)
device = train_sparsity_hyperparams.primary_device
args.distributed = n_gpu > 1
# ======================================= Define the Util for Training Setup =======================================
def Print_Experiment_Status(exp_log_file):
'''
Usage:
To print out all the relevant status of
'''
experiment_status_str = '\n' + '--------------- Training Start ---------------' + '\n\n'
experiment_status_str += 'Params: ' + '\n\n' + \
' Model and Data: ' + '\n' + \
' Data Folder: ' + str(args.path) + '\n' + \
' Multi-Layer Perceptron Num Layers: ' + str(args.n_mlp) + '\n' + \
' Latent Variable Dimension: ' + str(args.latent) + '\n' + \
' Generated Image Size: ' + str(args.size) + '\n' + \
' Channel Multiplier: ' + str(args.channel_multiplier) + '\n' + \
' Initial Checkpoint: ' + str(args.ckpt) + '\n' + \
' Load Training State: ' + str(args.load_train_state) + '\n\n' + \
' GPU Setup: ' + '\n' + \
' Distributed Training: ' + str(args.distributed) + '\n' + \
' Primiary GPU Device: ' + device + '\n' + \
' GPU Device IDs: ' + str(train_sparsity_hyperparams.gpu_device_ids) + '\n' + \
' Number of GPUs: ' + str(n_gpu) + '\n\n' + \
' Training Params: ' + '\n' + \
' Training Iterations: ' + str(args.iter) + '\n' + \
' Batch Size: ' + str(args.batch) + '\n' + \
' Learning Rate: ' + str(args.lr) + '\n' + \
' Generator Path Regularization Frequency: ' + str(args.g_reg_every) + '\n' + \
' Path Regularization Weight: ' + str(args.path_regularize) + '\n' + \
' Path Batch Shrink Ratio: ' + str(args.path_batch_shrink) + '\n' + \
' Discriminator Regularization Frequency: ' + str(args.d_reg_every) + '\n' + \
' Discriminator Regularization Weight: ' + str(args.r1) + '\n' + \
' Noise Mixing: ' + str(args.mixing) + '\n\n' + \
' Sparsity Params: ' + '\n' + \
' Eta: ' + str(args.sparsity_eta) + '\n' + \
' Init_Step: ' + str(args.init_step) + '\n' + \
' Pruning Metric: ' + str(args.prune_metric) + '\n' + \
' Pruning Mode: ' + str(args.pruning_mode) + '\n' + \
' Global Remove Channel Number: ' + str(args.num_rmve_channel) + '\n' + \
' Layer Remove Ratio: ' + str(args.lay_rmve_ratio) + '\n' + \
' Model Prune Freqeuncy: ' + str(args.model_prune_freq) + '\n\n' + \
' Validation Params: ' + '\n' + \
' Number of Validated Samples: ' + str(args.n_sample) + '\n' + \
' Generate Sample Frequency: ' + str(args.val_sample_freq) + '\n' + \
' Model Saving Frequency: ' + str(args.model_save_freq) + '\n' + \
' FID Sample Num: ' + str(args.fid_n_sample) + '\n' + \
' FID Sample Batch Size: ' + str(args.fid_batch) + '\n\n'
if args.teacher_ckpt is not None:
experiment_status_str += ' Knowledge Distillation Params: ' + '\n' + \
' Teacher Checkpoint: ' + str(args.teacher_ckpt) + '\n' + \
' L1 Knowledge Distillation Weight: ' + str(args.kd_l1_lambda) + '\n' + \
' L1 Knowledge Distillation Mode: ' + str(args.kd_l1_mode) + '\n' + \
' Percept Knowledge Distilation Weight: ' + str(args.kd_percept_lambda) + '\n' + \
' Percept Knowledge Distilation Mode: ' + str(args.kd_percept_mode) + '\n\n'
else:
experiment_status_str += ' No Knowledge Distillation' + '\n\n'
print(experiment_status_str)
exp_log_file.write(experiment_status_str)
def Adjust_Initial_Num_Training_Step(adam_opt, step):
'''
Usage:
To adjust the initial training step of the Adam adam_opt
Avoid escaping local minima in the initial step
'''
opt_dict = adam_opt.state_dict()
for param in opt_dict['param_groups'][0]['params']:
step_dict = {'step': step, 'exp_avg': torch.zeros(1), 'exp_avg_sq': torch.tensor(1)}
opt_dict['state'][param] = step_dict
adam_opt.load_state_dict(opt_dict)
def Get_Readable_Cur_Time():
return datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(1 - decay, par2[k].data)
def sample_data(loader):
while True:
for batch in loader:
yield batch
def Set_G_D_Optim(generator, discriminator, args):
'''
Usage:
Setup the optimizer for generator and discriminator
'''
g_optim = optim.Adam(
generator.parameters(),
lr=args.lr * args.g_reg_ratio,
betas=(0 ** args.g_reg_ratio, 0.99 ** args.g_reg_ratio),
)
d_optim = optim.Adam(
discriminator.parameters(),
lr=args.lr * args.d_reg_ratio,
betas=(0 ** args.d_reg_ratio, 0.99 ** args.d_reg_ratio),
)
return g_optim, d_optim
# ======================================= Define the Training Loss =======================================
def d_logistic_loss(real_pred, fake_pred):
real_loss = F.softplus(-real_pred)
fake_loss = F.softplus(fake_pred)
return real_loss.mean() + fake_loss.mean()
def d_r1_loss(real_pred, real_img):
grad_real, = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True
)
grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_nonsaturating_loss(fake_pred):
loss = F.softplus(-fake_pred).mean()
return loss
def KD_loss(args, teacher_g, noise, fake_img, fake_img_list, percept_loss):
'''
Usage:
Define the l1 knowledge distillation loss + LPIPS loss
'''
fake_img_teacher_list = teacher_g(noise, return_rgb_list=True)
fake_img_teacher = fake_img_teacher_list[-1]
fake_img_teacher.requires_grad = True
# kd_l1_loss
if args.kd_l1_mode == 'Output_Only':
kd_l1_loss = args.kd_l1_lambda * torch.mean(torch.abs(fake_img_teacher - fake_img))
elif args.kd_l1_mode == 'Intermediate':
for fake_img_teacher in fake_img_teacher_list:
fake_img_teacher.requires_grad = True
loss_list = [torch.mean(torch.abs(fake_img_teacher - fake_img)) for (fake_img_teacher, fake_img) in zip(fake_img_teacher_list, fake_img_list)]
kd_l1_loss = args.kd_l1_lambda * sum(loss_list)
# kd_percept_loss
if args.size > train_sparsity_hyperparams.PERCEPT_LOSS_IMAGE_SIZE: # pooled the image for LPIPS for memory saving
pooled_kernel_size = args.size // train_sparsity_hyperparams.PERCEPT_LOSS_IMAGE_SIZE
fake_img = F.avg_pool2d(fake_img, kernel_size = pooled_kernel_size, stride = pooled_kernel_size)
fake_img_teacher = F.avg_pool2d(fake_img_teacher, kernel_size = pooled_kernel_size, stride = pooled_kernel_size)
if args.kd_percept_mode == 'LPIPS':
kd_percept_loss = args.kd_percept_lambda * torch.mean(percept_loss(fake_img, fake_img_teacher))
elif args.kd_percept_mode == 'VGG':
student_output_vgg_features = percept_loss(fake_img)
teacher_output_vgg_features = percept_loss(fake_img_teacher)
kd_percept_loss = args.kd_percept_lambda * perceptual_loss(student_output_vgg_features, teacher_output_vgg_features)[0]
return kd_l1_loss, kd_percept_loss
def L1_Style_Sparse_loss(args, style_list):
'''
Usage:
Define the l1 sparsity loss for styles
'''
sparse_loss_list = []
for style in style_list:
style_mean = torch.mean(style.squeeze(), axis = 0)
l1_sparse_loss = torch.sum(torch.abs(style_mean))
sparse_loss_list.append(l1_sparse_loss)
sparse_loss = args.sparsity_eta * sum(sparse_loss_list)
return sparse_loss
# ======================================= Define the Training Sub-Procedure =======================================
def make_noise(batch, latent_dim, n_noise, device):
if n_noise == 1:
return torch.randn(batch, latent_dim, device=device)
noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)
return noises
def mixing_noise(batch, latent_dim, prob, device):
if prob > 0 and random.random() < prob:
return make_noise(batch, latent_dim, 2, device)
else:
return [make_noise(batch, latent_dim, 1, device)]
def D_Loss_BackProp(generator, discriminator, real_img, args, device, loss_dict, d_optim):
'''
Usage:
To update the discriminator based on the GAN loss
'''
requires_grad(generator, False)
requires_grad(discriminator, True)
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
fake_img = generator(noise)
fake_pred = discriminator(fake_img)
real_pred = discriminator(real_img)
d_loss = d_logistic_loss(real_pred, fake_pred)
loss_dict['d'] = d_loss
loss_dict['real_score'] = real_pred.mean()
loss_dict['fake_score'] = fake_pred.mean()
discriminator.zero_grad()
d_loss.backward()
d_optim.step()
def D_Reg_BackProp(real_img, discriminator, args, d_optim):
'''
Usage:
To update the discriminator based on the regularization
'''
real_img.requires_grad = True
real_pred = discriminator(real_img)
r1_loss = d_r1_loss(real_pred, real_img)
discriminator.zero_grad()
(args.r1 / 2 * r1_loss * args.d_reg_every + 0 * real_pred[0]).backward()
d_optim.step()
return r1_loss
def G_Loss_BackProp(generator, discriminator, args, device, loss_dict, g_optim, teacher_g, percept_loss):
'''
Usage:
To update the generator based on the GAN loss and KD loss
'''
requires_grad(generator, True)
requires_grad(discriminator, False)
# GAN Loss
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
fake_img_list, style_list = generator(noise, return_rgb_list=True, return_style_scalars=True)
fake_img = fake_img_list[-1]
fake_pred = discriminator(fake_img)
g_loss = g_nonsaturating_loss(fake_pred)
loss_dict['g'] = g_loss
# L1 Sparsity Penalty on Styles
sparse_loss = L1_Style_Sparse_loss(args, style_list)
loss_dict['sparse'] = sparse_loss
total_loss = g_loss + sparse_loss
# KD Loss
if teacher_g is not None:
kd_l1_loss, kd_percept_loss = KD_loss(args, teacher_g, noise, fake_img, fake_img_list, percept_loss)
loss_dict['kd_l1_loss'] = kd_l1_loss
loss_dict['kd_percept_loss'] = kd_percept_loss
total_loss = g_loss + sparse_loss + kd_l1_loss + kd_percept_loss
generator.zero_grad()
total_loss.backward()
g_optim.step()
def G_Reg_BackProp(generator, args, mean_path_length, g_optim):
'''
Usage:
To update the generator based on the regularization
'''
path_batch_size = max(1, args.batch // args.path_batch_shrink)
noise = mixing_noise(path_batch_size, args.latent, args.mixing, device)
fake_img, path_lengths = | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for the Bring Your Own Datatype framework.
TODO(@gussmith23 @hypercubestart) link to documentation"""
import tvm
import tvm.topi.testing
import numpy as np
import pytest
from numpy.random import MT19937, RandomState, SeedSequence
from tvm import relay
from tvm.relay.testing.layers import batch_norm_infer
from tvm.target.datatype import (
register,
register_min_func,
register_op,
create_lower_func,
lower_ite,
lower_call_pure_extern,
create_min_lower_func,
)
from tvm.tir.op import call_pure_extern
# note: we can't use relay.testing models because params are randomly initialized,
# which lead the output to have the same values
# get mobilenet model from Gluon CV
# because: https://discuss.tvm.apache.org/t/mobilenet-intermediate-values-are-0/7812
def get_mobilenet():
dshape = (1, 3, 224, 224)
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("mobilenet0.25", pretrained=True)
shape_dict = {"data": dshape}
return relay.frontend.from_mxnet(block, shape_dict)
# use real image instead of random data for end-to-end model training
# or else output would all be around the same value
def get_cat_image(dimensions):
from tvm.contrib.download import download_testdata
from PIL import Image
url = "https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png"
dst = "cat.png"
real_dst = download_testdata(url, dst, module="data")
img = Image.open(real_dst).resize(dimensions)
# CoreML's standard model image format is BGR
img_bgr = np.array(img)[:, :, ::-1]
img = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
return np.asarray(img, dtype="float32")
# we use a random seed to generate input_data
# to guarantee stable tests
rs = RandomState(MT19937(SeedSequence(123456789)))
def convert_ndarray(dst_dtype, array):
"""Converts NDArray(s) into the specified datatype"""
x = relay.var("x", shape=array.shape, dtype=str(array.dtype))
cast = relay.Function([x], x.astype(dst_dtype))
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
return relay.create_executor("graph").evaluate(cast)(array)
def change_dtype(src, dst, module, params):
"""Convert constants and functions in module from src type to dst type.
Returns changed module and converted params of type dst_type.
"""
module = relay.frontend.ChangeDatatype(src, dst)(module)
module = relay.transform.InferType()(module)
params = {k: convert_ndarray(dst, v) for k, v in params.items()}
return module, params
def compare(module, input, src_dtype, dst_dtype, rtol, atol, params={}, target="llvm"):
module = relay.transform.InferType()(module)
module = relay.transform.SimplifyInference()(module)
ex = relay.create_executor("graph", mod=module)
correct = ex.evaluate()(*input, **params)
module, converted_params = change_dtype(src_dtype, dst_dtype, module, params)
ex = relay.create_executor("graph", mod=module, target=target)
# converts all inputs to dst_dtype
x_converted = [convert_ndarray(dst_dtype, arr) for arr in input]
# Vectorization is not implemented with custom datatypes
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
maybe_correct = ex.evaluate()(*x_converted, **converted_params)
# currently this only works for comparing single output
maybe_correct_converted = convert_ndarray(src_dtype, maybe_correct)
np.testing.assert_allclose(
maybe_correct_converted.asnumpy(), correct.asnumpy(), rtol=rtol, atol=atol
)
def setup_myfloat():
"""Set up tests for myfloat (a custom datatype that under the hood is float)
Currently, this registers some custom datatypes using the Bring Your
Own Datatypes framework.
"""
# To use datatype operations in an external library, you should first load
# the library containing the datatype implementation:
# CDLL("libposit.so", RTLD_GLOBAL)
# In this case, the datatype library we are using is built right into TVM,
# so we do not need to explicitly load any library.
# You can pick a code for your datatype arbitrarily, as long as it is
# greater than 128 and has not already been chosen.
register("myfloat", 131)
register_op(
create_lower_func({(32, 32): "FloatToCustom32"}), "Cast", "llvm", "float", "myfloat"
)
register_op(
create_lower_func({(32, 32): "Custom32ToFloat"}), "Cast", "llvm", "myfloat", "float"
)
register_op(create_lower_func({32: "Custom32Add"}), "Add", "llvm", "myfloat")
register_op(
create_lower_func(
{
32: "Custom32Sub",
}
),
"Sub",
"llvm",
"myfloat",
)
register_op(create_lower_func({32: "Custom32Mul"}), "Mul", "llvm", "myfloat")
register_op(
create_lower_func(
{
32: "FloatToCustom32",
}
),
"FloatImm",
"llvm",
"myfloat",
)
register_op(
create_lower_func(
{
32: "Custom32Div",
}
),
"Div",
"llvm",
"myfloat",
)
register_op(create_lower_func({32: "Custom32Max"}), "Max", "llvm", "myfloat")
register_op(
create_lower_func({32: "Custom32Sqrt"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.sqrt",
)
register_op(
create_lower_func({32: "Custom32Exp"}), "Call", "llvm", "myfloat", intrinsic_name="tir.exp"
)
register_op(
create_lower_func({32: "Custom32Log"}), "Call", "llvm", "myfloat", intrinsic_name="tir.log"
)
register_op(
create_lower_func({32: "Custom32Sigmoid"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.sigmoid",
)
register_op(
create_lower_func({32: "Custom32Tanh"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.tanh",
)
register_op(lower_ite, "Call", "llvm", "myfloat", intrinsic_name="tir.if_then_else")
register_op(
lower_call_pure_extern, "Call", "llvm", "myfloat", intrinsic_name="tir.call_pure_extern"
)
register_min_func(create_min_lower_func({32: "MinCustom32"}, "myfloat"), "myfloat")
def setup_posites2():
"""Set up tests for posites2
Currently, this registers some custom datatypes using the Bring Your
Own Datatypes framework.
"""
# To use datatype operations in an external library, you should first load
# the library containing the datatype implementation:
# CDLL("libposit.so", RTLD_GLOBAL)
# In this case, the datatype library we are using is built right into TVM,
# so we do not need to explicitly load any library.
# You can pick a code for your datatype arbitrarily, as long as it is
# greater than 128 and has not already been chosen.
register("posites2", 132)
register_op(
create_lower_func(
{
(32, 32): "FloatToPosit32es2",
(32, 16): "FloatToPosit16es2",
(32, 8): "FloatToPosit8es2",
}
),
"Cast",
"llvm",
"float",
"posites2",
)
register_op(
create_lower_func(
{
(32, 32): "Posit32es2ToFloat",
(16, 32): "Posit16es2ToFloat",
(8, 32): "Posit8es2ToFloat",
}
),
"Cast",
"llvm",
"posites2",
"float",
)
register_op(
create_lower_func({32: "Posit32es2Add", 16: "Posit16es2Add", 8: "Posit8es2Add"}),
"Add",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Sub", 16: "Posit16es2Sub", 8: "Posit8es2Sub"}),
"Sub",
"llvm",
"posites2",
)
register_op(
create_lower_func(
{32: "FloatToPosit32es2", 16: "FloatToPosit16es2", 8: "FloatToPosit8es2"}
),
"FloatImm",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Mul", 16: "Posit16es2Mul", 8: "Posit8es2Mul"}),
"Mul",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Div", 16: "Posit16es2Div", 8: "Posit8es2Div"}),
"Div",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Max", 16: "Posit16es2Max", 8: "Posit8es2Max"}),
"Max",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Sqrt", 16: "Posit16es2Sqrt", 8: "Posit8es2Sqrt"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.sqrt",
)
register_op(lower_ite, "Call", "llvm", "posites2", intrinsic_name="tir.if_then_else")
register_op(
lower_call_pure_extern, "Call", "llvm", "posites2", intrinsic_name="tir.call_pure_extern"
)
register_op(
create_lower_func({32: "Posit32es2Exp", 16: "Posit16es2Exp", 8: "Posit8es2Exp"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.exp",
)
register_op(
create_lower_func({32: "Posit32es2Log", 16: "Posit16es2Log", 8: "Posit8es2Log"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.log",
)
register_op(
create_lower_func(
{32: "Posit32es2Sigmoid", 16: "Posit16es2Sigmoid", 8: "Posit8es2Sigmoid"}
),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.sigmoid",
)
register_op(
create_lower_func({32: "Posit32es2Tanh", 16: "Posit16es2Tanh", 8: "Posit8es2Tanh"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.tanh",
)
register_min_func(
create_min_lower_func(
{32: "MinPosit32es2", 16: "MinPosit16es2", 8: "MinPosit8es2"}, "posites2"
),
"posites2",
)
def run_ops(src_dtype, dst_dtype, rtol=1e-7, atol=1e-7):
"""Run the same op, but with two different datatypes"""
# used for unary ops, first shape in binary ops
shape1 = (5, 10, 5)
# second shape for binary ops
shape2 = (5,)
def check_unary_op(op, src_dtype, dst_dtype, shape):
t1 = relay.TensorType(shape, src_dtype)
x = relay.var("x", t1)
z = op(x)
x_data = rs.rand(*shape).astype(t1.dtype)
module = tvm.IRModule.from_expr(relay.Function([x], z))
compare(module, (x_data,), src_dtype, dst_dtype, rtol, atol)
# test unary ops
for op in [
relay.nn.softmax,
tvm.relay.log,
tvm.relay.exp,
tvm.relay.sqrt,
tvm.relay.rsqrt,
tvm.relay.sigmoid,
tvm.relay.tanh,
relay.nn.relu,
relay.nn.batch_flatten,
]:
check_unary_op(op, src_dtype, dst_dtype, shape1)
# test unary ops over 4d data
for op in [relay.nn.max_pool2d, relay.nn.avg_pool2d, relay.nn.global_avg_pool2d]:
shape_2d = (3, 32, 32, 32)
check_unary_op(op, src_dtype, dst_dtype, shape_2d)
def check_binary_op(opfunc, src_dtype, dst_dtype):
t1 = relay.TensorType(shape1, src_dtype)
t2 = relay.TensorType(shape2, src_dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
z = opfunc(x, y)
x_data = rs.rand(*shape1).astype(t1.dtype)
y_data = rs.rand(*shape2).astype(t2.dtype)
module = tvm.IRModule.from_expr(relay.Function([x, y], z))
compare(module, (x_data, y_data), src_dtype, dst_dtype, rtol, atol)
for op in [
relay.add,
relay.subtract,
relay.divide,
relay.multiply,
]:
check_binary_op(op, src_dtype, dst_dtype)
# we would like to test tvm_if_then_else
# but Relay.IfNode is not lowered to this intrinsic,
# so to keep our tests consistent with relay, we decide to not unit test
# Note: tvm_if_then_else is tested as part of the mobile_net model
def run_model(get_workload, input, src_dtype, dst_dtype, rtol=1e-4, atol=1e-4):
module, params = get_workload()
# we don't generate random data here
# because then the output data would all be around the same value
compare(module, input, src_dtype, dst_dtype, rtol, atol, params)
def run_conv2d(src_dtype, dst_dtype, rtol=1e-7, atol=1e-4):
def run_test_conv2d(
src_dtype,
dst_dtype,
scale,
dshape,
kshape,
padding=(1, 1),
groups=1,
dilation=(1, 1),
**attrs,
):
x = relay.var("x", shape=dshape, dtype=src_dtype)
w = relay.var("w", shape=kshape, dtype=src_dtype)
y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)
module = tvm.IRModule.from_expr(relay.Function([x, w], y))
data = rs.uniform(-scale, scale, size=dshape).astype(src_dtype)
kernel = rs.uniform(-scale, scale, size=kshape).astype(src_dtype)
compare(module, (data, kernel), src_dtype, dst_dtype, rtol, atol)
# depthwise conv2d
dshape = (1, 32, 18, 18)
kshape = (32, 1, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=32,
groups=32,
kernel_size=(3, 3),
| |
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import unicode_literals, division, absolute_import, print_function
from .compatibility_utils import PY2, bstr, utf8_str
if PY2:
range = xrange
import os
import struct
# note: struct pack, unpack, unpack_from all require bytestring format
# data all the way up to at least python 2.7.5, python 3 okay with bytestring
import re
# note: re requites the pattern to be the exact same type as the data to be searched in python3
# but u"" is not allowed for the pattern itself only b""
from .mobi_index import MobiIndex
from .mobi_utils import fromBase32
from .unipath import pathof
_guide_types = [b'cover',b'title-page',b'toc',b'index',b'glossary',b'acknowledgements',
b'bibliography',b'colophon',b'copyright-page',b'dedication',
b'epigraph',b'foreword',b'loi',b'lot',b'notes',b'preface',b'text']
# locate beginning and ending positions of tag with specific aid attribute
def locate_beg_end_of_tag(ml, aid):
pattern = utf8_str(r'''<[^>]*\said\s*=\s*['"]%s['"][^>]*>''' % aid)
aid_pattern = re.compile(pattern,re.IGNORECASE)
for m in re.finditer(aid_pattern, ml):
plt = m.start()
pgt = ml.find(b'>',plt+1)
return plt, pgt
return 0, 0
# iterate over all tags in block in reverse order, i.e. last ta to first tag
def reverse_tag_iter(block):
end = len(block)
while True:
pgt = block.rfind(b'>', 0, end)
if pgt == -1:
break
plt = block.rfind(b'<', 0, pgt)
if plt == -1:
break
yield block[plt:pgt+1]
end = plt
class K8Processor:
def __init__(self, mh, sect, files, debug=False):
self.sect = sect
self.files = files
self.mi = MobiIndex(sect)
self.mh = mh
self.skelidx = mh.skelidx
self.fragidx = mh.fragidx
self.guideidx = mh.guideidx
self.fdst = mh.fdst
self.flowmap = {}
self.flows = None
self.flowinfo = []
self.parts = None
self.partinfo = []
self.linked_aids = set()
self.fdsttbl= [0,0xffffffff]
self.DEBUG = debug
# read in and parse the FDST info which is very similar in format to the Palm DB section
# parsing except it provides offsets into rawML file and not the Palm DB file
# this is needed to split up the final css, svg, etc flow section
# that can exist at the end of the rawML file
if self.fdst != 0xffffffff:
header = self.sect.loadSection(self.fdst)
if header[0:4] == b"FDST":
num_sections, = struct.unpack_from(b'>L', header, 0x08)
self.fdsttbl = struct.unpack_from(bstr('>%dL' % (num_sections*2)), header, 12)[::2] + (mh.rawSize, )
sect.setsectiondescription(self.fdst,"KF8 FDST INDX")
if self.DEBUG:
print("\nFDST Section Map: %d sections" % num_sections)
for j in range(num_sections):
print("Section %d: 0x%08X - 0x%08X" % (j, self.fdsttbl[j],self.fdsttbl[j+1]))
else:
print("\nError: K8 Mobi with Missing FDST info")
# read/process skeleton index info to create the skeleton table
skeltbl = []
if self.skelidx != 0xffffffff:
# for i in range(2):
# fname = 'skel%04d.dat' % i
# data = self.sect.loadSection(self.skelidx + i)
# with open(pathof(fname), 'wb') as f:
# f.write(data)
outtbl, ctoc_text = self.mi.getIndexData(self.skelidx, "KF8 Skeleton")
fileptr = 0
for [text, tagMap] in outtbl:
# file number, skeleton name, fragtbl record count, start position, length
skeltbl.append([fileptr, text, tagMap[1][0], tagMap[6][0], tagMap[6][1]])
fileptr += 1
self.skeltbl = skeltbl
if self.DEBUG:
print("\nSkel Table: %d entries" % len(self.skeltbl))
print("table: filenum, skeleton name, frag tbl record count, start position, length")
for j in range(len(self.skeltbl)):
print(self.skeltbl[j])
# read/process the fragment index to create the fragment table
fragtbl = []
if self.fragidx != 0xffffffff:
# for i in range(3):
# fname = 'frag%04d.dat' % i
# data = self.sect.loadSection(self.fragidx + i)
# with open(pathof(fname), 'wb') as f:
# f.write(data)
outtbl, ctoc_text = self.mi.getIndexData(self.fragidx, "KF8 Fragment")
for [text, tagMap] in outtbl:
# insert position, ctoc offset (aidtext), file number, sequence number, start position, length
ctocoffset = tagMap[2][0]
ctocdata = ctoc_text[ctocoffset]
fragtbl.append([int(text), ctocdata, tagMap[3][0], tagMap[4][0], tagMap[6][0], tagMap[6][1]])
self.fragtbl = fragtbl
if self.DEBUG:
print("\nFragment Table: %d entries" % len(self.fragtbl))
print("table: file position, link id text, file num, sequence number, start position, length")
for j in range(len(self.fragtbl)):
print(self.fragtbl[j])
# read / process guide index for guide elements of opf
guidetbl = []
if self.guideidx != 0xffffffff:
# for i in range(3):
# fname = 'guide%04d.dat' % i
# data = self.sect.loadSection(self.guideidx + i)
# with open(pathof(fname), 'wb') as f:
# f.write(data)
outtbl, ctoc_text = self.mi.getIndexData(self.guideidx, "KF8 Guide elements)")
for [text, tagMap] in outtbl:
# ref_type, ref_title, frag number
ctocoffset = tagMap[1][0]
ref_title = ctoc_text[ctocoffset]
ref_type = text
fileno = None
if 3 in tagMap:
fileno = tagMap[3][0]
if 6 in tagMap:
fileno = tagMap[6][0]
guidetbl.append([ref_type, ref_title, fileno])
self.guidetbl = guidetbl
if self.DEBUG:
print("\nGuide Table: %d entries" % len(self.guidetbl))
print("table: ref_type, ref_title, fragtbl entry number")
for j in range(len(self.guidetbl)):
print(self.guidetbl[j])
def buildParts(self, rawML):
# now split the rawML into its flow pieces
self.flows = []
for j in range(0, len(self.fdsttbl)-1):
start = self.fdsttbl[j]
end = self.fdsttbl[j+1]
self.flows.append(rawML[start:end])
# the first piece represents the xhtml text
text = self.flows[0]
self.flows[0] = b''
# walk the <skeleton> and fragment tables to build original source xhtml files
# *without* destroying any file position information needed for later href processing
# and create final list of file separation start: stop points and etc in partinfo
if self.DEBUG:
print("\nRebuilding flow piece 0: the main body of the ebook")
self.parts = []
self.partinfo = []
fragptr = 0
baseptr = 0
cnt = 0
for [skelnum, skelname, fragcnt, skelpos, skellen] in self.skeltbl:
baseptr = skelpos + skellen
skeleton = text[skelpos: baseptr]
for i in range(fragcnt):
[insertpos, idtext, filenum, seqnum, startpos, length] = self.fragtbl[fragptr]
aidtext = idtext[12:-2]
if i == 0:
filename = 'part%04d.xhtml' % filenum
slice = text[baseptr: baseptr + length]
insertpos = insertpos - skelpos
head = skeleton[:insertpos]
tail = skeleton[insertpos:]
actual_inspos = insertpos
if (tail.find(b'>') < tail.find(b'<') or head.rfind(b'>') < head.rfind(b'<')):
# There is an incomplete tag in either the head or tail.
# This can happen for some badly formed KF8 files
print('The fragment table for %s has incorrect insert position. Calculating manually.' % skelname)
bp, ep = locate_beg_end_of_tag(skeleton, aidtext)
if bp != ep:
actual_inspos = ep + 1 + startpos
if insertpos != actual_inspos:
print("fixed corrupt fragment table insert position", insertpos+skelpos, actual_inspos+skelpos)
insertpos = actual_inspos
self.fragtbl[fragptr][0] = actual_inspos + skelpos
skeleton = skeleton[0:insertpos] + slice + skeleton[insertpos:]
baseptr = baseptr + length
fragptr += 1
cnt += 1
self.parts.append(skeleton)
self.partinfo.append([skelnum, 'Text', filename, skelpos, baseptr, aidtext])
assembled_text = b''.join(self.parts)
if self.DEBUG:
outassembled = os.path.join(self.files.k8dir, 'assembled_text.dat')
with open(pathof(outassembled),'wb') as f:
f.write(assembled_text)
# The primary css style sheet is typically stored next followed by any
# snippets of code that were previously inlined in the
# original xhtml but have been stripped out and placed here.
# This can include local CDATA snippets and and svg sections.
# The problem is that for most browsers and ereaders, you can not
# use <img src="imageXXXX.svg" /> to import any svg image that itself
# properly uses an <image/> tag to import some raster image - it
# should work according to the spec but does not for almost all browsers
# and ereaders and causes epub validation issues because those raster
# images are in manifest but not in xhtml text - since they only
# referenced from an svg image
# So we need to check the remaining flow pieces to see if they are css
# or svg images. if svg images, we must check if they have an <image />
# and if so inline them into the xhtml text pieces.
# there may be other sorts of pieces stored here but until we see one
# in the wild to reverse engineer we won't be able to tell
self.flowinfo.append([None, None, None, None])
svg_tag_pattern = re.compile(br'''(<svg[^>]*>)''', re.IGNORECASE)
image_tag_pattern = re.compile(br'''(<image[^>]*>)''', re.IGNORECASE)
for j in range(1,len(self.flows)):
flowpart = self.flows[j]
nstr = '%04d' % j
m = re.search(svg_tag_pattern, flowpart)
if m is not None:
# svg
ptype = b'svg'
start = m.start()
m2 = re.search(image_tag_pattern, flowpart)
if m2 is not None:
pformat = b'inline'
pdir = None
fname = None
# strip off anything before <svg if inlining
flowpart = flowpart[start:]
else:
pformat = b'file'
pdir = "Images"
fname = 'svgimg' + nstr + '.svg'
else:
# search for CDATA and if exists inline it
if flowpart.find(b'[CDATA[') >= 0:
ptype = b'css'
flowpart = b'<style type="text/css">\n' + flowpart + b'\n</style>\n'
pformat = b'inline'
pdir = None
fname = None
else:
# css - assume as standalone css file
| |
<reponame>mhilton/juju-charm-helpers
import io
import os
import contextlib
import unittest
from copy import copy
from tests.helpers import patch_open
from testtools import TestCase
from mock import MagicMock, patch, call
from charmhelpers.fetch import ubuntu as fetch
from charmhelpers.core.hookenv import flush
import charmhelpers.contrib.openstack.utils as openstack
import six
if not six.PY3:
builtin_open = '__builtin__.open'
builtin_import = '__builtin__.__import__'
else:
builtin_open = 'builtins.open'
builtin_import = 'builtins.__import__'
# mocked return of openstack.lsb_release()
FAKE_RELEASE = {
'DISTRIB_CODENAME': 'precise',
'DISTRIB_RELEASE': '12.04',
'DISTRIB_ID': 'Ubuntu',
'DISTRIB_DESCRIPTION': '"Ubuntu 12.04"'
}
FAKE_REPO = {
# liberty patch release
'neutron-common': {
'pkg_vers': '2:7.0.1-0ubuntu1',
'os_release': 'liberty',
'os_version': '2015.2'
},
# liberty release version
'nova-common': {
'pkg_vers': '2:12.0.0~b1-0ubuntu1',
'os_release': 'liberty',
'os_version': '2015.2'
},
'nova': {
'pkg_vers': '2012.2.3-0ubuntu2.1',
'os_release': 'folsom',
'os_version': '2012.2'
},
'glance-common': {
'pkg_vers': '2012.1.3+stable-20130423-74b067df-0ubuntu1',
'os_release': 'essex',
'os_version': '2012.1'
},
'keystone-common': {
'pkg_vers': '1:2013.1-0ubuntu1.1~cloud0',
'os_release': 'grizzly',
'os_version': '2013.1'
},
# Exercise swift version detection
'swift-storage': {
'pkg_vers': '1.8.0-0ubuntu1',
'os_release': 'grizzly',
'os_version': '1.8.0'
},
'swift-proxy': {
'pkg_vers': '1.13.1-0ubuntu1',
'os_release': 'icehouse',
'os_version': '1.13.1'
},
'swift-common': {
'pkg_vers': '1.10.0~rc1-0ubuntu1',
'os_release': 'havana',
'os_version': '1.10.0'
},
'swift-mitaka-dev': {
'pkg_vers': '2.7.1.dev8.201605111703.trusty-0ubuntu1',
'os_release': 'mitaka',
'os_version': '2.7.0'
},
# a package thats available in the cache but is not installed
'cinder-common': {
'os_release': 'havana',
'os_version': '2013.2'
},
# poorly formed openstack version
'bad-version': {
'pkg_vers': '1:2200.1-0ubuntu1.1~cloud0',
'os_release': None,
'os_version': None
}
}
MOUNTS = [
['/mnt', '/dev/vdb']
]
url = 'deb ' + openstack.CLOUD_ARCHIVE_URL
UCA_SOURCES = [
('cloud:precise-folsom/proposed', url + ' precise-proposed/folsom main'),
('cloud:precise-folsom', url + ' precise-updates/folsom main'),
('cloud:precise-folsom/updates', url + ' precise-updates/folsom main'),
('cloud:precise-grizzly/proposed', url + ' precise-proposed/grizzly main'),
('cloud:precise-grizzly', url + ' precise-updates/grizzly main'),
('cloud:precise-grizzly/updates', url + ' precise-updates/grizzly main'),
('cloud:precise-havana/proposed', url + ' precise-proposed/havana main'),
('cloud:precise-havana', url + ' precise-updates/havana main'),
('cloud:precise-havana/updates', url + ' precise-updates/havana main'),
('cloud:precise-icehouse/proposed',
url + ' precise-proposed/icehouse main'),
('cloud:precise-icehouse', url + ' precise-updates/icehouse main'),
('cloud:precise-icehouse/updates', url + ' precise-updates/icehouse main'),
]
# Mock python-dnspython resolver used by get_host_ip()
class FakeAnswer(object):
def __init__(self, ip):
self.ip = ip
def __str__(self):
return self.ip
class FakeResolver(object):
def __init__(self, ip):
self.ip = ip
def query(self, hostname, query_type):
if self.ip == '':
return []
else:
return [FakeAnswer(self.ip)]
class FakeReverse(object):
def from_address(self, address):
return '172.16.58.3.in-addr.arpa'
class FakeDNSName(object):
def __init__(self, dnsname):
pass
class FakeDNS(object):
def __init__(self, ip):
self.resolver = FakeResolver(ip)
self.reversename = FakeReverse()
self.name = MagicMock()
self.name.Name = FakeDNSName
class OpenStackHelpersTestCase(TestCase):
def _apt_cache(self):
# mocks out the apt cache
def cache_get(package):
pkg = MagicMock()
if package in FAKE_REPO and 'pkg_vers' in FAKE_REPO[package]:
pkg.name = package
pkg.current_ver.ver_str = FAKE_REPO[package]['pkg_vers']
elif (package in FAKE_REPO and
'pkg_vers' not in FAKE_REPO[package]):
pkg.name = package
pkg.current_ver = None
else:
raise KeyError
return pkg
cache = MagicMock()
cache.__getitem__.side_effect = cache_get
return cache
@patch('charmhelpers.contrib.openstack.utils.lsb_release')
def test_os_codename_from_install_source(self, mocked_lsb):
"""Test mapping install source to OpenStack release name"""
mocked_lsb.return_value = FAKE_RELEASE
# the openstack release shipped with respective ubuntu/lsb release.
self.assertEquals(openstack.get_os_codename_install_source('distro'),
'essex')
# proposed pocket
self.assertEquals(openstack.get_os_codename_install_source(
'distro-proposed'),
'essex')
# various cloud archive pockets
src = 'cloud:precise-grizzly'
self.assertEquals(openstack.get_os_codename_install_source(src),
'grizzly')
src = 'cloud:precise-grizzly/proposed'
self.assertEquals(openstack.get_os_codename_install_source(src),
'grizzly')
# ppas and full repo urls.
src = 'ppa:openstack-ubuntu-testing/havana-trunk-testing'
self.assertEquals(openstack.get_os_codename_install_source(src),
'havana')
src = ('deb http://ubuntu-cloud.archive.canonical.com/ubuntu '
'precise-havana main')
self.assertEquals(openstack.get_os_codename_install_source(src),
'havana')
self.assertEquals(openstack.get_os_codename_install_source(None),
'')
@patch.object(openstack, 'get_os_version_codename')
@patch.object(openstack, 'get_os_codename_install_source')
def test_os_version_from_install_source(self, codename, version):
codename.return_value = 'grizzly'
openstack.get_os_version_install_source('cloud:precise-grizzly')
version.assert_called_with('grizzly')
@patch('charmhelpers.contrib.openstack.utils.lsb_release')
def test_os_codename_from_bad_install_source(self, mocked_lsb):
"""Test mapping install source to OpenStack release name"""
_fake_release = copy(FAKE_RELEASE)
_fake_release['DISTRIB_CODENAME'] = 'natty'
mocked_lsb.return_value = _fake_release
_e = 'charmhelpers.contrib.openstack.utils.error_out'
with patch(_e) as mocked_err:
openstack.get_os_codename_install_source('distro')
_er = ('Could not derive openstack release for this Ubuntu '
'release: natty')
mocked_err.assert_called_with(_er)
def test_os_codename_from_version(self):
"""Test mapping OpenStack numerical versions to code name"""
self.assertEquals(openstack.get_os_codename_version('2013.1'),
'grizzly')
@patch('charmhelpers.contrib.openstack.utils.error_out')
def test_os_codename_from_bad_version(self, mocked_error):
"""Test mapping a bad OpenStack numerical versions to code name"""
openstack.get_os_codename_version('2014.5.5')
expected_err = ('Could not determine OpenStack codename for '
'version 2014.5.5')
mocked_error.assert_called_with(expected_err)
def test_os_version_from_codename(self):
"""Test mapping a OpenStack codename to numerical version"""
self.assertEquals(openstack.get_os_version_codename('folsom'),
'2012.2')
@patch('charmhelpers.contrib.openstack.utils.error_out')
def test_os_version_from_bad_codename(self, mocked_error):
"""Test mapping a bad OpenStack codename to numerical version"""
openstack.get_os_version_codename('foo')
expected_err = 'Could not derive OpenStack version for codename: foo'
mocked_error.assert_called_with(expected_err)
def test_os_version_swift_from_codename(self):
"""Test mapping a swift codename to numerical version"""
self.assertEquals(openstack.get_os_version_codename_swift('liberty'),
'2.5.0')
def test_get_swift_codename_single_version_kilo(self):
self.assertEquals(openstack.get_swift_codename('2.2.2'), 'kilo')
@patch('charmhelpers.contrib.openstack.utils.error_out')
def test_os_version_swift_from_bad_codename(self, mocked_error):
"""Test mapping a bad swift codename to numerical version"""
openstack.get_os_version_codename_swift('foo')
expected_err = 'Could not derive swift version for codename: foo'
mocked_error.assert_called_with(expected_err)
def test_get_swift_codename_multiple_versions_liberty(self):
with patch('subprocess.check_output') as _subp:
_subp.return_value = b"... trusty-updates/liberty/main ..."
self.assertEquals(openstack.get_swift_codename('2.5.0'), 'liberty')
def test_get_swift_codename_multiple_versions_mitaka(self):
with patch('subprocess.check_output') as _subp:
_subp.return_value = b"... trusty-updates/mitaka/main ..."
self.assertEquals(openstack.get_swift_codename('2.5.0'), 'mitaka')
def test_get_swift_codename_none(self):
self.assertEquals(openstack.get_swift_codename('1.2.3'), None)
@patch.object(openstack, 'snap_install_requested')
def test_os_codename_from_package(self, mock_snap_install_requested):
"""Test deriving OpenStack codename from an installed package"""
mock_snap_install_requested.return_value = False
with patch('apt_pkg.Cache') as cache:
cache.return_value = self._apt_cache()
for pkg, vers in six.iteritems(FAKE_REPO):
# test fake repo for all "installed" packages
if pkg.startswith('bad-'):
continue
if 'pkg_vers' not in vers:
continue
self.assertEquals(openstack.get_os_codename_package(pkg),
vers['os_release'])
@patch.object(openstack, 'snap_install_requested')
@patch('charmhelpers.contrib.openstack.utils.error_out')
def test_os_codename_from_bad_package_version(self, mocked_error,
mock_snap_install_requested):
"""Test deriving OpenStack codename for a poorly versioned package"""
mock_snap_install_requested.return_value = False
with patch('apt_pkg.Cache') as cache:
cache.return_value = self._apt_cache()
openstack.get_os_codename_package('bad-version')
_e = ('Could not determine OpenStack codename for version 2200.1')
mocked_error.assert_called_with(_e)
@patch.object(openstack, 'snap_install_requested')
@patch('charmhelpers.contrib.openstack.utils.error_out')
def test_os_codename_from_bad_package(self, mocked_error,
mock_snap_install_requested):
"""Test deriving OpenStack codename from an unavailable package"""
mock_snap_install_requested.return_value = False
with patch('apt_pkg.Cache') as cache:
cache.return_value = self._apt_cache()
try:
openstack.get_os_codename_package('foo')
except Exception:
# ignore exceptions that raise when error_out is mocked
# and doesn't sys.exit(1)
pass
e = 'Could not determine version of package with no installation '\
'candidate: foo'
mocked_error.assert_called_with(e)
@patch.object(openstack, 'snap_install_requested')
def test_os_codename_from_bad_package_nonfatal(
self, mock_snap_install_requested):
"""Test OpenStack codename from an unavailable package is non-fatal"""
mock_snap_install_requested.return_value = False
with patch('apt_pkg.Cache') as cache:
cache.return_value = self._apt_cache()
self.assertEquals(
None,
openstack.get_os_codename_package('foo', fatal=False)
)
@patch.object(openstack, 'snap_install_requested')
@patch('charmhelpers.contrib.openstack.utils.error_out')
def test_os_codename_from_uninstalled_package(self, mock_error,
mock_snap_install_requested):
"""Test OpenStack codename from an available but uninstalled pkg"""
mock_snap_install_requested.return_value = False
with patch('apt_pkg.Cache') as cache:
cache.return_value = self._apt_cache()
try:
openstack.get_os_codename_package('cinder-common', fatal=True)
except Exception:
pass
e = ('Could not determine version of uninstalled package: '
'cinder-common')
mock_error.assert_called_with(e)
@patch.object(openstack, 'snap_install_requested')
def test_os_codename_from_uninstalled_package_nonfatal(
self, mock_snap_install_requested):
"""Test OpenStack codename from avail uninstalled pkg is non fatal"""
mock_snap_install_requested.return_value = False
with patch('apt_pkg.Cache') as cache:
cache.return_value = self._apt_cache()
self.assertEquals(
None,
openstack.get_os_codename_package('cinder-common', fatal=False)
)
@patch.object(openstack, 'snap_install_requested')
@patch('charmhelpers.contrib.openstack.utils.error_out')
def test_os_version_from_package(self, mocked_error,
mock_snap_install_requested):
"""Test deriving OpenStack version from an installed package"""
mock_snap_install_requested.return_value = False
with patch('apt_pkg.Cache') as cache:
cache.return_value = self._apt_cache()
for pkg, vers in six.iteritems(FAKE_REPO):
if pkg.startswith('bad-'):
continue
if 'pkg_vers' not in vers:
continue
self.assertEquals(openstack.get_os_version_package(pkg),
vers['os_version'])
@patch.object(openstack, 'snap_install_requested')
@patch('charmhelpers.contrib.openstack.utils.error_out')
def test_os_version_from_bad_package(self, mocked_error,
mock_snap_install_requested):
"""Test deriving OpenStack version from an uninstalled package"""
mock_snap_install_requested.return_value = False
with patch('apt_pkg.Cache') as cache:
cache.return_value = self._apt_cache()
try:
openstack.get_os_version_package('foo')
except Exception:
# ignore exceptions that raise when error_out is mocked
# and doesn't sys.exit(1)
pass
e = 'Could not determine version of package with no installation '\
'candidate: foo'
mocked_error.assert_called_with(e)
@patch.object(openstack, 'snap_install_requested')
def test_os_version_from_bad_package_nonfatal(
self, mock_snap_install_requested):
"""Test OpenStack version from an uninstalled package is non-fatal"""
mock_snap_install_requested.return_value = False
with patch('apt_pkg.Cache') as cache:
cache.return_value = self._apt_cache()
self.assertEquals(
None,
openstack.get_os_version_package('foo', fatal=False)
)
@patch.object(openstack, 'get_os_codename_package')
@patch('charmhelpers.contrib.openstack.utils.config')
def test_os_release_uncached(self, config, get_cn):
openstack._os_rel = None
get_cn.return_value = 'folsom'
self.assertEquals('folsom', openstack.os_release('nova-common'))
def test_os_release_cached(self):
openstack._os_rel = 'foo'
self.assertEquals('foo', openstack.os_release('nova-common'))
@patch.object(openstack, 'juju_log')
@patch('sys.exit')
def test_error_out(self, mocked_exit, juju_log):
"""Test erroring out"""
openstack.error_out('Everything broke.')
_log = 'FATAL ERROR: Everything broke.'
juju_log.assert_called_with(_log, level='ERROR')
mocked_exit.assert_called_with(1)
def test_get_source_and_pgp_key(self):
tests = {
"source|key": ('source', 'key'),
"source|": ('source', None),
"|key": ('', 'key'),
"source": ('source', None),
}
for k, v in six.iteritems(tests):
self.assertEqual(openstack.get_source_and_pgp_key(k), v)
# These should still work, even though the bulk of the functionality has
# moved to charmhelpers.fetch.add_source()
def test_configure_install_source_distro(self):
"""Test configuring installation from distro"""
self.assertIsNone(openstack.configure_installation_source('distro'))
def test_configure_install_source_ppa(self):
"""Test configuring installation source from PPA"""
with patch('subprocess.check_call') as mock:
src = 'ppa:gandelman-a/openstack'
openstack.configure_installation_source(src)
ex_cmd = [
'add-apt-repository', '--yes', 'ppa:gandelman-a/openstack']
mock.assert_called_with(ex_cmd)
@patch('subprocess.check_call')
@patch.object(fetch, 'import_key')
def test_configure_install_source_deb_url(self, _import, _spcc):
"""Test configuring installation source from deb repo url"""
src = ('deb http://ubuntu-cloud.archive.canonical.com/ubuntu '
'precise-havana main|KEYID')
openstack.configure_installation_source(src)
_import.assert_called_with('KEYID')
_spcc.assert_called_once_with(
['add-apt-repository', '--yes',
'deb http://ubuntu-cloud.archive.canonical.com/ubuntu '
'precise-havana main'])
@patch.object(fetch, 'lsb_release')
@patch(builtin_open)
@patch('subprocess.check_call')
def test_configure_install_source_distro_proposed(
self, _spcc, _open, _lsb):
"""Test configuring installation source from deb repo url"""
_lsb.return_value = FAKE_RELEASE
_file = MagicMock(spec=io.FileIO)
_open.return_value = _file
openstack.configure_installation_source('distro-proposed')
_file.__enter__().write.assert_called_once_with(
'# Proposed\ndeb http://archive.ubuntu.com/ubuntu '
'precise-proposed main universe multiverse restricted\n')
src = ('deb http://archive.ubuntu.com/ubuntu/ precise-proposed '
'restricted main multiverse universe')
openstack.configure_installation_source(src)
_spcc.assert_called_once_with(
['add-apt-repository', '--yes',
'deb http://archive.ubuntu.com/ubuntu/ precise-proposed '
'restricted main multiverse universe'])
@patch('charmhelpers.fetch.filter_installed_packages')
@patch('charmhelpers.fetch.apt_install')
@patch.object(openstack, 'error_out')
@patch.object(openstack, 'juju_log')
def test_add_source_cloud_invalid_pocket(self, _log, _out,
apt_install, filter_pkg):
openstack.configure_installation_source("cloud:havana-updates")
_e = ('Invalid Cloud Archive release specified: '
'havana-updates on this Ubuntuversion')
_s = _out.call_args[0][0]
self.assertTrue(_s.startswith(_e))
@patch.object(fetch, 'filter_installed_packages')
| |
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.array_equal(samples1, samples2)
def test_parameters_Poisson():
reseed()
eps = np.finfo(np.float32).eps
param = iap.Poisson(1)
sample = param.draw_sample()
samples = param.draw_samples((100, 1000))
samples_direct = np.random.RandomState(1234).poisson(lam=1, size=(100, 1000))
assert sample.shape == tuple()
assert samples.shape == (100, 1000)
assert 0 < sample
assert param.__str__() == param.__repr__() == "Poisson(Deterministic(int 1))"
for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
count_direct = np.sum(samples_direct == i)
count = np.sum(samples == i)
tolerance = max(count_direct * 0.1, 250)
assert count_direct - tolerance < count < count_direct + tolerance
param = iap.Poisson(1)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.array_equal(samples1, samples2)
def test_parameters_Normal():
reseed()
param = iap.Normal(0, 1)
sample = param.draw_sample()
samples = param.draw_samples((100, 1000))
samples_direct = np.random.RandomState(1234).normal(loc=0, scale=1, size=(100, 1000))
assert sample.shape == tuple()
assert samples.shape == (100, 1000)
assert param.__str__() == param.__repr__() == "Normal(loc=Deterministic(int 0), scale=Deterministic(int 1))"
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0), density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert density_direct - tolerance < density < density_direct + tolerance
param = iap.Normal(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
param1 = iap.Normal(0, 1)
param2 = iap.Normal(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert 100 - 10 < np.std(samples2) < 100 + 10
param = iap.Normal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.allclose(samples1, samples2)
def test_parameters_Laplace():
reseed()
eps = np.finfo(np.float32).eps
param = iap.Laplace(0, 1)
sample = param.draw_sample()
samples = param.draw_samples((100, 1000))
samples_direct = np.random.RandomState(1234).laplace(loc=0, scale=1, size=(100, 1000))
assert sample.shape == tuple()
assert samples.shape == (100, 1000)
assert param.__str__() == param.__repr__() == "Laplace(loc=Deterministic(int 0), scale=Deterministic(int 1))"
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0), density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert density_direct - tolerance < density < density_direct + tolerance
param = iap.Laplace(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
param1 = iap.Laplace(0, 1)
param2 = iap.Laplace(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
param1 = iap.Laplace(1, 0)
samples = param1.draw_samples((100,))
assert np.all(np.logical_and(
samples > 1 - eps,
samples < 1 + eps
))
param = iap.Laplace(0, 1)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.allclose(samples1, samples2)
def test_parameters_ChiSquare():
reseed()
param = iap.ChiSquare(1)
sample = param.draw_sample()
samples = param.draw_samples((100, 1000))
samples_direct = np.random.RandomState(1234).chisquare(df=1, size=(100, 1000))
assert sample.shape == tuple()
assert samples.shape == (100, 1000)
assert 0 <= sample
assert np.all(0 <= samples)
assert param.__str__() == param.__repr__() == "ChiSquare(df=Deterministic(int 1))"
samples = np.clip(samples, 0, 3)
samples_direct = np.clip(samples_direct, 0, 3)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 3.0), density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(0, 3.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert density_direct - tolerance < density < density_direct + tolerance
param = iap.ChiSquare(iap.Choice([1, 10]))
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if 1 - 1.0 < exp < 1 + 1.0:
seen[0] += 1
elif 10 - 4.0 < exp < 10 + 4.0:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
param1 = iap.ChiSquare(1)
param2 = iap.ChiSquare(10)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
assert 2*1 - 1.0 < np.var(samples1) < 2*1 + 1.0
assert 2*10 - 5.0 < np.var(samples2) < 2*10 + 5.0
param = iap.ChiSquare(1)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.allclose(samples1, samples2)
def test_parameters_Weibull():
reseed()
param = iap.Weibull(1)
sample = param.draw_sample()
samples = param.draw_samples((100, 1000))
samples_direct = np.random.RandomState(1234).weibull(a=1, size=(100, 1000))
assert sample.shape == tuple()
assert samples.shape == (100, 1000)
assert 0 <= sample
assert np.all(0 <= samples)
assert param.__str__() == param.__repr__() == "Weibull(a=Deterministic(int 1))"
samples = np.clip(samples, 0, 2)
samples_direct = np.clip(samples_direct, 0, 2)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 2.0), density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(0, 2.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert density_direct - tolerance < density < density_direct + tolerance
param = iap.Weibull(iap.Choice([1, 0.5]))
expected_first = scipy.special.gamma(1 + 1/1)
expected_second = scipy.special.gamma(1 + 1/0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((50000,))
observed = np.mean(samples)
if expected_first - 0.2 * expected_first < observed < expected_first + 0.2 * expected_first:
seen[0] += 1
elif expected_second - 0.2 * expected_second < observed < expected_second + 0.2 * expected_second:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
param1 = iap.Weibull(1)
param2 = iap.Weibull(0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
assert np.var(samples1) < np.var(samples2)
expected_first = scipy.special.gamma(1 + 2/1) - (scipy.special.gamma(1 + 1/1))**2
expected_second = scipy.special.gamma(1 + 2/0.5) - (scipy.special.gamma(1 + 1/0.5))**2
assert expected_first - 0.2 * expected_first < np.var(samples1) < expected_first + 0.2 * expected_first
assert expected_second - 0.2 * expected_second < np.var(samples2) < expected_second + 0.2 * expected_second
param = iap.Weibull(1)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.allclose(samples1, samples2)
def test_parameters_Uniform():
reseed()
eps = np.finfo(np.float32).eps
param = iap.Uniform(0, 1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 0 - eps < sample < 1.0 + eps
assert np.all(np.logical_and(0 - eps < samples, samples < 1.0 + eps))
assert param.__str__() == param.__repr__() == "Uniform(Deterministic(int 0), Deterministic(float 1.00000000))"
samples = param.draw_samples((10000,))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0.0, 1.0), density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / samples.size
assert density_expected - density_tolerance < density < density_expected + density_tolerance
param = iap.Uniform(-1.0, 1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - eps < sample < 1.0 + eps
assert np.all(np.logical_and(-1.0 - eps < samples, samples < 1.0 + eps))
param = iap.Uniform(1.0, -1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - eps < sample < 1.0 + eps
assert np.all(np.logical_and(-1.0 - eps < samples, samples < 1.0 + eps))
param = iap.Uniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - eps < sample < 1.0 + eps
assert np.all(np.logical_and(-1.0 - eps < samples, samples < 1.0 + eps))
param = iap.Uniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 1.0 - eps < sample < 1.0 + eps
assert np.all(np.logical_and(1.0 - eps < samples, samples < 1.0 + eps))
param = iap.Uniform(-1.0, 1.0)
samples1 | |
trees are considered the same if they are structurally
identical, and the nodes have the same value.
Example 1:
(1) (1)
/ \ / \
(2) (3) (2) (3)
Input: p = [1,2,3], q = [1,2,3]
Output: true
Example 2:
Input: p = [1,2], q = [1,null,2]
Output: false
Example 3:
Input: p = [1,2,1], q = [1,1,2]
Output: false
"""
def isSameTree(self, p: Optional[TreeNode], q: Optional[TreeNode]) -> bool:
def fn(p, q):
if not p or not q:
return p is q
return fn(p.left, q.left) and p.val == q.val and fn(p.right, q.right)
return fn(p, q)
r"""
# - Invert/Flip Binary Tree -
# https://leetcode.com/problems/invert-binary-tree/
Given the root of a binary tree, invert the tree, and return its root.
Example 1:
Input: root = [4,2,7,1,3,6,9]
Output: [4,7,2,9,6,3,1]
Example 2:
(2) (2)
/ \ -> / \
(1) (3) (3) (1)
Input: root = [2,1,3]
Output: [2,3,1]
Example 3:
Input: root = []
Output: []
"""
def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
if not root:
return None
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root
r"""
# - Binary Tree Maximum Path Sum -
# https://leetcode.com/problems/binary-tree-maximum-path-sum/
A path in a binary tree is a sequence of nodes where each pair of adjacent
nodes in the sequence has an edge connecting them. A node can only appear
in the sequence at most once. Note that the path does not need to pass
through the root.
The path sum of a path is the sum of the node's values in the path.
Given the root of a binary tree, return the maximum path sum of any non-empty path.
Example 1:
(1)
/ \
(2) (3)
Input: root = [1,2,3]
Output: 6
Explanation: The optimal path is 2 -> 1 -> 3 with a path sum of 2 + 1 + 3 = 6.
# NOTE this is *not* a directed graph, you can go up the tree
Example 2:
Input: root = [-10,9,20,null,null,15,7]
Output: 42
Explanation: The optimal path is 15 -> 20 -> 7 with a path sum of 15 + 20 + 7 = 42.
"""
def maxPathSum(self, root: Optional[TreeNode]) -> int:
def max_gain(node):
nonlocal ans
if not node:
return 0
lgain, rgain = max(max_gain(node.left), 0), max(max_gain(node.right), 0)
new_path = node.val + lgain + rgain
ans = max(ans, new_path)
return node.val + max(lgain, rgain)
ans = -inf
max_gain(root)
return ans # type: ignore
def maxPathSum_(self, root: Optional[TreeNode]) -> int:
def fn(node):
if not node:
return 0, -inf
lh, lps = fn(node.left)
rh, rps = fn(node.right)
return node.val + max(0, lh, rh), max(
lps, rps, node.val + max(0, lh) + max(0, rh)
)
return fn(root)[1] # type: ignore
r"""
# - Binary Tree Level Order Traversal -
# https://leetcode.com/problems/binary-tree-level-order-traversal/
Given the root of a binary tree, return the level order traversal of
its nodes' values. (i.e., from left to right, level by level).
Example 1:
(3)
/ \
(9) (20)
/ \
(15) (7)
Input: root = [3,9,20,null,null,15,7]
Output: [[3],[9,20],[15,7]]
Example 2:
Input: root = [1]
Output: [[1]]
Example 3:
Input: root = []
Output: []
"""
def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:
ans, queue = [], [root]
while queue:
newq, vals = [], []
for node in queue:
if node:
vals.append(node.val)
newq.extend([node.left, node.right])
if vals:
ans.append(vals)
queue = newq
return ans
r"""
# - Serialize and Deserialize Binary Tree -
# https://leetcode.com/problems/serialize-and-deserialize-binary-tree/
Serialization is the process of converting a data structure or object into a
sequence of bits so that it can be stored in a file or memory buffer, or
transmitted across a network connection link to be reconstructed later in the
same or another computer environment.
Design an algorithm to serialize and deserialize a binary tree. There is no
restriction on how your serialization/deserialization algorithm should work.
You just need to ensure that a binary tree can be serialized to a string and
this string can be deserialized to the original tree structure.
Clarification: The input/output format is the same as how LeetCode serializes a
binary tree. You do not necessarily need to follow this format, so please be
creative and come up with different approaches yourself.
Example 1:
(1)
/ \
(2) (3)
/ \
(4) (5)
Input: root = [1,2,3,null,null,4,5]
Output: [1,2,3,null,null,4,5]
Example 2:
Input: root = []
Output: []
"""
class Codec:
"""DFS O(n) time O(n) space"""
def serialize(self, root: Optional[TreeNode]) -> str:
def rserialize(root, string):
if root is None:
string += "None,"
else:
string += str(root.val) + ","
string = rserialize(root.left, string)
string = rserialize(root.right, string)
return string
return rserialize(root, "")
def deserialize(self, data: str) -> Optional[TreeNode]:
def rdeserialize(l):
if l[0] == "None":
l.pop(0)
return None
root = TreeNode(l[0])
l.pop(0)
root.left = rdeserialize(l)
root.right = rdeserialize(l)
return root
data_list = data.split(",")
root = rdeserialize(data_list)
return root
class Codec_:
r""" BFS O(n) time and O(n) space, BFS traversal
e.g., 1
/ \
2 5
/ \
3 4 , level order traversal, serialize will be '1,2,5,3,4,None,None,None,None,None,None,'; deserialize
with queue as well, convert back. Time and Space O(n).
"""
def serialize(self, root: Optional[TreeNode]) -> str:
if not root:
return ""
queue: Deque[TreeNode] = deque()
queue.append(root)
ans = ""
while queue:
node = queue.popleft()
if not node:
ans += "None,"
continue
ans += str(node.val) + ","
queue.append(node.left) # type: ignore
queue.append(node.right) # type: ignore
return ans
def deserialize(self, data: str) -> Optional[TreeNode]:
if not data:
return None
ls: Final = data.split(",")
root = TreeNode(int(ls[0]))
queue: Deque[TreeNode] = deque()
queue.append(root)
i = 1
while queue and i < len(ls):
node = queue.popleft()
if ls[i] != "None":
left = TreeNode(int(ls[i]))
node.left = left
queue.append(left)
i += 1
if ls[i] != "None":
right = TreeNode(int(ls[i]))
node.right = right
queue.append(right)
i += 1
return root
r"""
# - Subtree of Another Tree -
# https://leetcode.com/problems/subtree-of-another-tree/
Given the roots of two binary trees `root` and `subRoot`, return `true` if there
is a subtree of `root` with the same structure and node values of `subRoot`
and `false` otherwise.
A subtree of a binary tree `tree` is a tree that consists of a node in `tree`
and all of this node's descendants. The tree `tree` could also be considered
as a subtree of itself.
Example 1:
3 4
/ \ / \
4 5 1 2
/ \
1 2
Input: root = [3,4,5,1,2], subRoot = [4,1,2]
Output: true
Example 2:
Input: root = [3,4,5,1,2,null,null,null,null,0], subRoot = [4,1,2]
Output: false
"""
def isSubtree(self, root: Optional[TreeNode], subRoot: Optional[TreeNode]) -> bool:
def isSame(root, subRoot):
if not root and not subRoot:
return True
if not root or not subRoot:
return False
if root.val != subRoot.val:
return False
return isSame(root.left, subRoot.left) and isSame(root.right, subRoot.right)
if not root:
return False
if isSame(root, subRoot):
return True
return self.isSubtree(root.left, subRoot) or self.isSubtree(root.right, subRoot)
"""
# - Construct Binary Tree from Preorder and Inorder Traversal -
# https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/
Given two integer arrays preorder and inorder where preorder is the preorder
traversal of a binary tree and inorder is the inorder traversal of the same
tree, construct and return the binary tree.
Example 1:
Input: preorder = [3,9,20,15,7], inorder = [9,3,15,20,7]
Output: [3,9,20,null,null,15,7]
Example 2:
Input: preorder = [-1], inorder = [-1]
Output: [-1]
# NOTE no duplicates in tree
"""
def buildTree(self, preorder: List[int], inorder: List[int]) -> Optional[TreeNode]:
if inorder:
index = inorder.index(preorder.pop(0))
root = TreeNode(inorder[index])
root.left = self.buildTree(preorder, inorder[0:index])
root.right = self.buildTree(preorder, inorder[index + 1 :])
return root
return None
r"""
# - Validate Binary Search Tree -
# https://leetcode.com/problems/validate-binary-search-tree/
Given the root of a binary tree, determine if it is a valid binary search
tree (BST).
A valid BST is defined as follows:
- The left subtree of a node contains only nodes with keys less than the
node's key.
- The right subtree of a node contains only nodes with keys greater than the
node's key.
- Both the left and right | |
##### Folder Cleaner
#####
##### © <NAME> - 2020
##### for Python 3
#####
from subprocess import check_output # Using this import just to install the dependencies if not.
# I will only use my two libraries filecenter and lifeeasy and will not import anything else after they are installed.
# IMPORTS
try:
import filecenter # Both are my libraries
import lifeeasy
except:
print('It is the first time launching the program')
print('Installing the dependencies...')
command_output = check_output(["pip", "install", "filecenter", "lifeeasy"], universal_newlines=True)
import filecenter
import lifeeasy
print('Successfully installed the dependencies!')
lifeeasy.sleep(2)
# GLOBAL VARIABLES DECLARATION
cleaning_dir = ''
destination_dir = ''
destination_dir_name = ''
unique_number = 1
number_of_items = 0
number_of_moved_items = 0
# FUNCTIONS
def start():
global cleaning_dir
global destination_dir
global destination_dir_name
global unique_number
global number_of_items
lifeeasy.clear()
if __file__ == '__main__':
print("What's the folder you want to clean today?")
cleaning_dir = input('> ')
else:
cleaning_dir = lifeeasy.working_dir()
if cleaning_dir.lower() == 'cancel' or cleaning_dir.lower() == 'stop' or cleaning_dir.lower() == 'quit' or cleaning_dir.lower() == 'exit':
goodbye(nothing=True)
elif filecenter.isdir(cleaning_dir):
if cleaning_dir[-1] != '/' or cleaning_dir[-1] != '\\':
if filecenter.os_name() == 'nt':
cleaning_dir = cleaning_dir + '\\'
else:
cleaning_dir = cleaning_dir + '/'
for _ in filecenter.files_in_dir(cleaning_dir):
number_of_items += 1
destination_dir_name = 'Cleaned'
while filecenter.exists(cleaning_dir + destination_dir_name):
destination_dir_name = destination_dir_name + ' ' + str(unique_number)
unique_number += 1
destination_dir = cleaning_dir + destination_dir_name
filecenter.make_dir(destination_dir)
decide_mode()
else:
lifeeasy.display_action('It seems like you mistyped the path', delay=0.1)
print('Please retry entering the path to your folder')
lifeeasy.sleep(2)
start()
def decide_mode():
lifeeasy.clear()
print('Available options')
print('')
print('nosort > nothing will be sorted in your cleaned up folder')
print('type > each file will be sorted and put in a folder according to its type')
print('')
print('')
print('')
print('How do you want to sort your cleaned folder?')
decision = input('> ')
if decision.lower() == 'cancel' or decision.lower() == 'stop' or decision.lower() == 'quit' or decision.lower() == 'exit':
goodbye(nothing=True)
elif decision.lower() == 'nosort' or decision.lower() == 'osort' or decision.lower() == 'nsort' or decision.lower() == 'noort' or decision.lower() == 'nosrt' or decision.lower() == 'nosot' or decision.lower() == 'nosor':
lifeeasy.display_title('Cleaning your folder')
lifeeasy.display_body(['Chosen mode: No Sorting'])
lifeeasy.display(wait=0.5)
nosort()
elif decision.lower() == 'type' or decision.lower() == 'ype' or decision.lower() == 'tpe' or decision.lower() == 'tye' or decision.lower() == 'typ':
lifeeasy.display_title('Cleaning your folder')
lifeeasy.display_body(['Chosen mode: Type Sorting'])
lifeeasy.display(wait=0.5)
sort_by_type()
else:
print('Sorry I did not understand.')
lifeeasy.sleep(2)
lifeeasy.clear()
decide_mode()
def nosort():
global number_of_moved_items
lifeeasy.display_body(['Chosen mode: No Sorting', 'Completion: 0%'])
list_of_files_in_cleaning_dir = filecenter.files_in_dir(cleaning_dir)
lifeeasy.display_body(['Chosen mode: No Sorting', 'Completion: 1%'])
completion = 0
for file in list_of_files_in_cleaning_dir:
completion += len(list_of_files_in_cleaning_dir) / 100 + 1
lifeeasy.display_body(['Chosen mode: No Sorting', 'Completion: {}%'.format(completion)])
if file == __file__:
continue
if file == destination_dir_name:
continue
filecenter.move(cleaning_dir + file, destination_dir)
number_of_moved_items += 1
lifeeasy.display_body(['Chosen mode: No Sorting', 'Completion: 100%'])
goodbye()
def sort_by_type():
global number_of_moved_items
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 0%'])
# _ are spaces
# è are slashes (replaced by & for path compatibility)
# Images_3D needs to be changed to 3D Images
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 1%'])
Archives = []
Audios = []
Backups = []
eBooks = []
Database_Files = []
Developers = []
Disk_Images = []
Encoded_Files = []
ApplicationsèExecutables = []
Fonts = []
Images_3D = []
Plugins = []
PresetsèSettings = []
Images = []
Raw_Images = []
ROMèGame_Files = []
Spreadsheets = []
System_Files = []
Text_FilesèDocuments = []
Vector_Images = []
Videos = []
Web_Documents = []
Folders = []
Other = []
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 2%'])
list_of_files_in_cleaning_dir = filecenter.files_in_dir(cleaning_dir)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 5%'])
completion = 0
for file in list_of_files_in_cleaning_dir:
completion += len(list_of_files_in_cleaning_dir) / 75 + 5
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: {}%'.format(completion)])
if file == __file__:
continue
if file == destination_dir_name:
continue
file_type = filecenter.type_from_extension(filecenter.extension_from_base(file))
if file_type == 'Archive':
Archives.append(file)
elif file_type == 'Audio':
Audios.append(file)
elif file_type == 'Backup':
Backups.append(file)
elif file_type == 'eBook':
eBooks.append(file)
elif file_type == 'Database File':
Database_Files.append(file)
elif file_type == 'Developer':
Developers.append(file)
elif file_type == 'Disk Image':
Disk_Images.append(file)
elif file_type == 'Encoded File':
Encoded_Files.append(file)
elif file_type == 'Application/Executable':
ApplicationsèExecutables.append(file)
elif file_type == 'Font':
Fonts.append(file)
elif file_type == '3D Image':
Images_3D.append(file)
elif file_type == 'Plugin':
Plugins.append(file)
elif file_type == 'Preset/Settings':
PresetsèSettings.append(file)
elif file_type == 'Image':
Images.append(file)
elif file_type == 'Raw Image':
Raw_Images.append(file)
elif file_type == 'ROM/Game File':
ROMèGame_Files.append(file)
elif file_type == 'Spreadsheet':
Spreadsheets.append(file)
elif file_type == 'System File':
System_Files.append(file)
elif file_type == 'Text File/Document':
Text_FilesèDocuments.append(file)
elif file_type == 'Vector Image':
Vector_Images.append(file)
elif file_type == 'Video':
Videos.append(file)
elif file_type == 'Web Document':
Web_Documents.append(file)
elif file_type == 'Folder':
Folders.append(file)
elif file_type == 'Document':
Text_FilesèDocuments.append(file)
else:
Other.append(file)
number_of_moved_items += 1
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 76%'])
if len(Archives) != 0:
archives_path = filecenter.make_dir(destination_dir + '/Archives')
for file in Archives:
filecenter.move(cleaning_dir + file, archives_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 77%'])
if len(Audios) != 0:
Audios_path = filecenter.make_dir(destination_dir + '/Audios')
for file in Audios:
filecenter.move(cleaning_dir + file, Audios_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 78%'])
if len(Backups) != 0:
Backups_path = filecenter.make_dir(destination_dir + '/Backups')
for file in Backups:
filecenter.move(cleaning_dir + file, Backups_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 79%'])
if len(eBooks) != 0:
eBooks_path = filecenter.make_dir(destination_dir + '/eBooks')
for file in eBooks:
filecenter.move(cleaning_dir + file, eBooks_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 80%'])
if len(Database_Files) != 0:
Database_Files_path = filecenter.make_dir(destination_dir + '/Database Files')
for file in Database_Files:
filecenter.move(cleaning_dir + file, Database_Files_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 81%'])
if len(Developers) != 0:
Developers_path = filecenter.make_dir(destination_dir + '/Developers')
for file in Developers:
filecenter.move(cleaning_dir + file, Developers_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 82%'])
if len(Disk_Images) != 0:
Disk_Images_path = filecenter.make_dir(destination_dir + '/Disk Images')
for file in Disk_Images:
filecenter.move(cleaning_dir + file, Disk_Images_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 83%'])
if len(Encoded_Files) != 0:
Encoded_Files_path = filecenter.make_dir(destination_dir + '/Encoded Files')
for file in Encoded_Files:
filecenter.move(cleaning_dir + file, Encoded_Files_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 84%'])
if len(ApplicationsèExecutables) != 0:
ApplicationsèExecutables_path = filecenter.make_dir(destination_dir + '/Applications & Executables')
for file in ApplicationsèExecutables:
filecenter.move(cleaning_dir + file, ApplicationsèExecutables_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 85%'])
if len(Fonts) != 0:
Fonts_path = filecenter.make_dir(destination_dir + '/Fonts')
for file in Fonts:
filecenter.move(cleaning_dir + file, Fonts_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 86%'])
if len(Images_3D) != 0:
Images_3D_path = filecenter.make_dir(destination_dir + '/3D Images')
for file in Images_3D:
filecenter.move(cleaning_dir + file, Images_3D_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 87%'])
if len(Plugins) != 0:
Plugins_path = filecenter.make_dir(destination_dir + '/Plugins')
for file in Plugins:
filecenter.move(cleaning_dir + file, Plugins_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 88%'])
if len(PresetsèSettings) != 0:
PresetsèSettings_path = filecenter.make_dir(destination_dir + '/Presets & Settings')
for file in PresetsèSettings:
filecenter.move(cleaning_dir + file, PresetsèSettings_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 89%'])
if len(Images) != 0:
Images_path = filecenter.make_dir(destination_dir + '/Images')
for file in Images:
filecenter.move(cleaning_dir + file, Images_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 90%'])
if len(Raw_Images) != 0:
Raw_Images_path = filecenter.make_dir(destination_dir + '/Raw Images')
for file in Raw_Images:
filecenter.move(cleaning_dir + file, Raw_Images_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 91%'])
if len(ROMèGame_Files) != 0:
ROMèGame_Files_path = filecenter.make_dir(destination_dir + '/ROM & Game Files')
for file in ROMèGame_Files:
filecenter.move(cleaning_dir + file, ROMèGame_Files_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 92%'])
if len(Spreadsheets) != 0:
Spreadsheets_path = filecenter.make_dir(destination_dir + '/Spreadsheets')
for file in Spreadsheets:
filecenter.move(cleaning_dir + file, Spreadsheets_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 93%'])
if len(System_Files) != 0:
System_Files_path = filecenter.make_dir(destination_dir + '/System Files')
for file in System_Files:
filecenter.move(cleaning_dir + file, System_Files_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 94%'])
if len(Text_FilesèDocuments) != 0:
Text_FilesèDocuments_path = filecenter.make_dir(destination_dir + '/Text Files & Documents')
for file in Text_FilesèDocuments:
filecenter.move(cleaning_dir + file, Text_FilesèDocuments_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 95%'])
if len(Vector_Images) != 0:
Vector_Images_path = filecenter.make_dir(destination_dir + '/Vector Images')
for file in Vector_Images:
filecenter.move(cleaning_dir + file, Vector_Images_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 96%'])
if len(Videos) != 0:
Videos_path = filecenter.make_dir(destination_dir + '/Videos')
for file in Videos:
filecenter.move(cleaning_dir + file, Videos_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 97%'])
if len(Web_Documents) != 0:
Web_Documents_path = filecenter.make_dir(destination_dir + '/Web Documents')
for file in Web_Documents:
filecenter.move(cleaning_dir + file, Web_Documents_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 98%'])
if len(Folders) != 0:
Folders_path = filecenter.make_dir(destination_dir + '/Folders')
for file in Folders:
filecenter.move(cleaning_dir + file, Folders_path)
lifeeasy.display_body(['Chosen mode: Type Sorting', 'Completion: 99%'])
if len(Other) != 0:
Other_path = filecenter.make_dir(destination_dir + '/Other')
| |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from django.contrib.sites.models import Site
from cms.utils.urlutils import admin_reverse
from djangocms_text_ckeditor.models import Text
from django.core.cache import cache
from django.core.management.base import CommandError
from django.core.management import call_command
from django.core.urlresolvers import reverse
from cms.api import create_page, add_plugin, create_title
from cms.constants import PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DEFAULT, PUBLISHER_STATE_DIRTY
from cms.management.commands import publisher_publish
from cms.models import CMSPlugin, Title
from cms.models.pagemodel import Page
from cms.plugin_pool import plugin_pool
from cms.test_utils.testcases import SettingsOverrideTestCase as TestCase
from cms.test_utils.util.context_managers import StdoutOverride, SettingsOverride
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import force_language
from cms.utils.compat.dj import get_user_model
class PublisherCommandTests(TestCase):
"""
Tests for the publish command
"""
def test_command_line_should_raise_without_superuser(self):
with self.assertRaises(CommandError):
com = publisher_publish.Command()
com.handle_noargs()
def test_command_line_publishes_zero_pages_on_empty_db(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '<EMAIL>', '123456')
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 0)
self.assertEqual(published_from_output, 0)
def test_command_line_ignores_draft_page(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '<EMAIL>', '123456')
create_page("The page!", "nav_playground.html", "en", published=False)
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 0)
self.assertEqual(published_from_output, 0)
self.assertEqual(Page.objects.public().count(), 0)
def test_command_line_publishes_draft_page(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '<EMAIL>', '123456')
create_page("The page!", "nav_playground.html", "en", published=False)
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish', include_unpublished=True)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
def test_command_line_publishes_selected_language(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '<EMAIL>', '<PASSWORD>')
page = create_page("en title", "nav_playground.html", "en")
title = create_title('de', 'de title', page)
title.published = True
title.save()
title = create_title('fr', 'fr title', page)
title.published = True
title.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish', language='de')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de'])
def test_command_line_publishes_selected_language_drafts(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '<EMAIL>', '<PASSWORD>')
page = create_page("en title", "nav_playground.html", "en")
title = create_title('de', 'de title', page)
title.published = False
title.save()
title = create_title('fr', 'fr title', page)
title.published = False
title.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish', language='de', include_unpublished=True)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de'])
def test_table_name_patching(self):
"""
This tests the plugin models patching when publishing from the command line
"""
User = get_user_model()
User.objects.create_superuser('djangocms', '<EMAIL>', '123456')
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
draft.publish('en')
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
# Manually undoing table name patching
Text._meta.db_table = 'djangocms_text_ckeditor_text'
plugin_pool.patched = False
with StdoutOverride():
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
def test_command_line_publishes_one_page(self):
"""
Publisher always creates two Page objects for every CMS page,
one is_draft and one is_public.
The public version of the page can be either published or not.
This bit of code uses sometimes manager methods and sometimes manual
filters on purpose (this helps test the managers)
"""
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '<EMAIL>', '<PASSWORD>')
# Now, let's create a page. That actually creates 2 Page objects
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
# Sanity check the database (we should have one draft and one public)
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
# Now check that the non-draft has the attribute we set to the draft.
non_draft = Page.objects.public()[0]
self.assertEqual(non_draft.reverse_id, 'a_test')
def test_command_line_publish_multiple_languages(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '<EMAIL>', '123456')
# Create a draft page with two published titles
page = create_page(u"The page!", "nav_playground.html", "en", published=False)
title = create_title('de', 'ja', page)
title.published = True
title.save()
title = create_title('fr', 'non', page)
title.published = True
title.save()
with StdoutOverride():
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de', 'fr'])
def test_command_line_publish_one_site(self):
get_user_model().objects.create_superuser('djangocms', '<EMAIL>', '123456')
siteA = Site.objects.create(domain='a.example.com', name='a.example.com')
siteB = Site.objects.create(domain='b.example.com', name='b.example.com')
#example.com
create_page(u"example.com homepage", "nav_playground.html", "en", published=True)
#a.example.com
create_page(u"a.example.com homepage", "nav_playground.html", "de", site=siteA, published=True)
#b.example.com
create_page(u"b.example.com homepage", "nav_playground.html", "de", site=siteB, published=True)
create_page(u"b.example.com about", "nav_playground.html", "nl", site=siteB, published=True)
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish', site=siteB.id)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 2)
self.assertEqual(published_from_output, 2)
def test_command_line_publish_multiple_languages_check_count(self):
"""
Publishing one page with multiple languages still counts
as one page. This test case checks whether it works
as expected.
"""
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '<EMAIL>', '<PASSWORD>')
# Now, let's create a page with 2 languages.
page = create_page("en title", "nav_playground.html", "en", published=True)
create_title("de", "de title", page)
page.publish("de")
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
def tearDown(self):
plugin_pool.patched = False
plugin_pool.set_plugin_meta()
class PublishingTests(TestCase):
def create_page(self, title=None, **kwargs):
return create_page(title or self._testMethodName,
"nav_playground.html", "en", **kwargs)
def test_publish_home(self):
name = self._testMethodName
page = self.create_page(name, published=False)
self.assertFalse(page.publisher_public_id)
self.assertEqual(Page.objects.all().count(), 1)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], "http://testserver/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
def test_publish_single(self):
name = self._testMethodName
page = self.create_page(name, published=False)
self.assertFalse(page.is_published('en'))
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published("en")
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(public, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
page.publish("en")
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published("en")
self.assertTrue(page.is_published('en'))
self.assertEqual(page.get_publisher_state("en"), PUBLISHER_STATE_DEFAULT)
self.assertIsNotNone(page.publisher_public)
self.assertTrue(page.publisher_public_id)
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(public, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_publisher_state("en"), 0)
def test_publish_admin(self):
page = self.create_page("test_admin", published=False)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_publisher_state('en'), 0)
def test_publish_wrong_lang(self):
page = self.create_page("test_admin", published=False)
superuser = self.get_superuser()
with SettingsOverride(
LANGUAGES=(('de', 'de'), ('en', 'en')),
CMS_LANGUAGES={1: [{'code': 'en', 'name': 'en', 'fallbacks': ['fr', 'de'], 'public': True}]}
):
with self.login_user_context(superuser):
with force_language('de'):
response = self.client.get(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
page = Page.objects.get(pk=page.pk)
def test_publish_child_first(self):
parent = self.create_page('parent', published=False)
child = self.create_page('child', published=False, parent=parent)
parent = parent.reload()
self.assertFalse(parent.is_published('en'))
self.assertFalse(child.is_published('en'))
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published('en')
for name in ('parent', 'child'):
self.assertObjectExist(drafts, | |
solstice). Invert a
light curve from one orbital phase and you will also fit some
East-West structure of the kernel, like the longitudinal width.
Or invert from two different phases and you fit some North-South
structure, like the **change in** dominant colatitude. So, kernel
characteristics help us estimate constraints on the spin axis without
doing inversions from real data.
Learn more about the kernel and its characteristics with
:func:`Kernel_WidthDomColat`, :func:`Kernels_Plot`,
and :func:`KChar_Evolve_Plot`.
.. note::
Inverting a light curve will depend on the quality of the
observational data. The planet's albedo map matters too:
East-West markings to sense daily brightness changes,
North-South markings to sense longer changes.
We have pre-calculated characteristics stored in numpy binary
files (the obvious two with names ending "values_all5deg.npy").
So, this method rounds inclination, obliquity, and solstice
to the nearest 5 degrees. It also tracks the North (green
circle) or South pole (green diamond) when obliquity is less
than or greater than 90 degrees, respectively.
Args:
phaseD_list (list):
Orbital phases of the planet in degrees. Standard range
is [0, 360). Phases are integers or floats, and list
elements can be
- *phase* for a longitudinal width,
- *[phase, phase]* for a change in dominant colatitude.
which (str):
The param set to use. Can be
- 'pri' for primary (default),
- 'alt' for alternate,
- '_c' for custom, see Note below.
constraint (str):
The type of prediction. Can be
- 'perf' for perfect constraints with no data
uncertainties,
- 'real' to use uncertainties and show {1,2,3}--sigma
regions,
- 'both' (default).
info (bool):
Include a legend subplot. Default is True.
combine (bool):
Join all constraints in a separate subplot. Default is True.
combine_only (bool):
Show **only** the combo constraint. Default is False.
keep_probdata (bool):
Output all probability data, see Returns below. Default
is False.
Optional:
res (int):
Resolution when ``constraint`` is 'real', the number of
probability contours to test. Default is 500.
n_sol (int):
Number of solstice grid points. Default is 361.
n_obl (int):
Number of obliquity grid points. Default is 91.
Very Optional:
**You should probably check out Section 4.1 of S16 before
you change any of these.**
phaseD_sig (float):
Uncertainty on orbital phase, in degrees. Default is 10.0.
incD_sig (float):
Uncertainty on inclination, in degrees. Default is 10.0.
kwid_sig (float):
Uncertainty on longitudinal, width in degrees. Default
is 10.0.
kddc_sig (float):
Uncertainty on change in dominant colatitude, in degrees.
Default is 20.0.
.. note::
Keywords are used by the interactive function :func:`Sandbox_Reflection`.
But if ``which`` is '_c', then enter your custom
params as ``incD_I``, ``solD_I`` and ``oblD_I``.
Standard definitions and formats apply.
See the :class:`class and constructor <DirectImaging_Planet>`
docstrings.
Effect:
Stores this matplotlib figure as ``fig_spin``, **overwriting**
the previous version. You can save the image later by
calling ``fig_spin.savefig(...)``.
Returns:
A list (user_file) if ``keep_probdata`` is True and ``constraint``
is **not** 'perf'.
- First entry is [incD, oblD, solD].
- Other entries are [*id*, 2D PDF, {1,2,3}--sigma
probability levels], where *id* is either a phaseD_list
element or 'Combined'.
"""
## Default keywords
_active = kwargs.get('_active',False)
incD_I = kwargs.get('incD_I',85)
solD_I = kwargs.get('solD_I',0)
oblD_I = kwargs.get('oblD_I',0)
made_combo_flag = False
entries = len(phaseD_list)
if _active:
w,h,sub,s = 3,2,5,5
elif combine_only:
w,h,sub,s = 1,1,1,1
else:
ex = lambda x: 1 if x else 0
sub = entries + ex(info) + ex(combine)
w,h,s = min(sub,3),1+((sub-1)//3),1
p = 0
if which == 'pri':
incD,solD,oblD = self.incD,self.solD,self.oblD
elif which == 'alt':
incD,solD,oblD = self.incD_b,self.solD_b,self.oblD_b
elif which == '_c':
incD,solD,oblD = incD_I,solD_I,oblD_I
mark = 'o'
if oblD > 90.0:
solD,oblD,mark = (solD % 360.0) + 180.0,180.0 - oblD,'D'
i_i,i_s,i_o = round(incD/5),round((solD%360)/5),round(oblD/5)
if keep_probdata:
user_file = [[5*i_i,5*i_o,5*i_s]]
incR,solR,oblR = np.radians(i_i*5),np.radians(i_s*5),np.radians(i_o*5)
incR_sig = np.radians(incD_sig)
combo_prob2d = np.ones(obl_2mesh_.shape)
sigma_probs = np.array([1,0.9973,0.9545,0.6827,0])
new_sols,new_obls = np.meshgrid(np.linspace(0,2.0*pi,n_sol),np.linspace(0,pi/2.0,n_obl),indexing='ij')
if not _active:
plt.figure(figsize=(5*w,5*h))
if info and not combine_only:
s = self._spinax_style(w,h,s,cm.gray,'info','0',new_sols,new_obls,0,0,constraint,
sol_2mesh_,obl_2mesh_,0,0,0,solR,oblR,0,_active,0,entries)
for j in np.arange(entries):
now_phaseD = phaseD_list[j]
if isinstance(now_phaseD,(int,float)):
p += 1
m_c = cm.Reds
i_p = round((now_phaseD%360)/5)
sav_phaseD = 5*i_p
phaseR = np.radians(i_p*5)
phaseR_sig = np.radians(phaseD_sig)
wid_mu = kernel_widths_[i_p,i_i,i_s,i_o]
kchar,k_mu = kernel_widths_[i_p,i_i,:,:],wid_mu
wid_sig = np.radians(kwid_sig)
if constraint in ['real','both']:
prob2d = self._spinax_prob_orignal(kernel_widths_,wid_mu,wid_sig,inc_4mesh_,incR,incR_sig,
phase_4mesh_,phaseR,phaseR_sig,obl_2mesh_,
False,'no','no','no')
else:
prob2d = 1
else:
p += 2
m_c = cm.Blues
i_p,i_p2 = round((now_phaseD[0]%360)/5),round((now_phaseD[1]%360)/5)
sav_phaseD = [5*i_p,5*i_p2]
phaseR,phaseR2 = np.radians(i_p*5),np.radians(i_p2*5)
phaseR_sig = np.radians(phaseD_sig)
dom1,dom2 = kernel_domcolats_[i_p,i_i,i_s,i_o],kernel_domcolats_[i_p2,i_i,i_s,i_o]
ddc_mu = abs(dom1-dom2)
kchar,k_mu = np.absolute(kernel_domcolats_[i_p,i_i,:,:]-kernel_domcolats_[i_p2,i_i,:,:]),ddc_mu
ddc_sig = np.radians(kddc_sig)
if constraint in ['real','both']:
prob2d = self._spinax_prob_orignal(kernel_delta_domcolats_,ddc_mu,ddc_sig,inc_4mesh_,incR,incR_sig,
phase_4mesh_,phaseR,phaseR_sig,obl_2mesh_,
True,shifted_phase_4mesh_,phaseR2,phaseR_sig)
else:
prob2d = 1
if combine or combine_only:
combo_prob2d *= prob2d
if made_combo_flag == False:
axC = plt.subplot(h,w,sub,projection='polar')
made_combo_flag = True
if constraint in ['perf','both']:
if constraint == 'perf':
this_color = m_c(0.33+0.67*(j/entries))
axC.contour(sol_2mesh_,obl_2mesh_,kchar,levels=[k_mu],colors=[this_color],
linewidths=3,linestyles='solid')
else:
axC.contour(sol_2mesh_,obl_2mesh_,kchar,levels=[k_mu],colors=[(0,0.3,0)],alpha=0.2,
linewidths=3,linestyles='dashed')
if constraint in ['real','both']:
new_prob2d = self._spinax_prob_redo(prob2d,sol_2mesh_,obl_2mesh_,new_sols,new_obls)
else:
new_prob2d = 1
if not combine_only:
if constraint in ['real','both']:
levels_sigma = self._spinax_leveling(new_prob2d,sigma_probs,res,new_obls)
if keep_probdata:
user_file.append([sav_phaseD,np.copy(new_prob2d),np.copy(levels_sigma)])
else:
levels_sigma = 1
s = self._spinax_style(w,h,s,m_c,'single','0',new_sols,new_obls,new_prob2d,levels_sigma,constraint,
sol_2mesh_,obl_2mesh_,kchar,k_mu,sav_phaseD,solR,oblR,mark,_active,j,entries)
if combine or combine_only:
if constraint in ['real','both']:
new_combo_prob2d = self._spinax_prob_redo(combo_prob2d,sol_2mesh_,obl_2mesh_,new_sols,new_obls)
levels_sigma = self._spinax_leveling(new_combo_prob2d,sigma_probs.T,res,new_obls)
if keep_probdata:
user_file.append(['Combined',np.copy(new_combo_prob2d),np.copy(levels_sigma)])
else:
new_combo_prob2d,levels_sigma = 1,1
m_c_here = lambda x: cm.Reds if x == entries else (cm.Blues if x == 2*entries else cm.Purples)
s = self._spinax_style(w,h,s,m_c_here(p),'combo',axC,new_sols,new_obls,new_combo_prob2d,levels_sigma,
constraint,sol_2mesh_,obl_2mesh_,kchar,k_mu,0,solR,oblR,mark,_active,0,entries)
if not _active:
plt.tight_layout()
self.fig_spin = plt.gcf()
plt.show()
if keep_probdata:
return user_file
def _savebutton_click(self):
"""Directs a button to save orbital phases."""
if self._pslot_act.value == 'all':
self._pword_act.value = '<center><font color="red">Only save to one slot at a time</font></center>'
else:
word_start = '<center><font color="limegreen">Saved current phase to '
wording = word_start+self._pslot_act.value+' slot</font></center>'
if self._pslot_act.value == 'light':
self._xph_lig = self._orb_act.value
elif self._pslot_act.value == 'medium':
self._xph_med = self._orb_act.value
elif self._pslot_act.value == 'dark':
self._xph_drk = self._orb_act.value
self._pword_act.value = wording
def _clearbutton_click(self):
"""Directs a button to clear orbital phases."""
word_start = '<center><font color="orange">Cleared phase from '+self._pslot_act.value
if self._pslot_act.value == 'all':
self._xph_lig = 'no'
self._xph_med = 'no'
self._xph_drk = 'no'
word_end = ' slots</font></center>'
else:
word_end = ' slot</font></center>'
if self._pslot_act.value == 'light':
self._xph_lig = 'no'
elif self._pslot_act.value == 'medium':
self._xph_med = 'no'
elif self._pslot_act.value == 'dark':
self._xph_drk = 'no'
self._pword_act.value = word_start+word_end
def _check_for_actspin(self,phases,switch):
"""Organizes orbital phases for spin axis constraints."""
new_ph = []
if switch == 'wid':
for p in phases:
if isinstance(p,(int,float)):
new_ph.append(p)
elif switch == 'dom':
c,n = 0,1
for p in phases[1:]:
if isinstance(p,(int,float)):
new_ph.append([phases[c],p])
c = n
n += 1
elif switch == 'both':
c,n = 0,1
lph = len(phases)
for p in phases:
if isinstance(p,(int,float)):
new_ph.append(p)
if (n != lph) and isinstance(phases[n],(int,float)):
new_ph.append([phases[c],phases[n]])
c = n
n += 1
return new_ph
def _actmodule_heart(self,phaseD_I,incD_I,oblD_I,solD_I,ratRO_I,res_I,longzeroD_I,lc_swit,spinax_swit):
"""Sets up and combines several plots about your exoplanet."""
self._pword_act.value = '<center><font color="blue">Ready to save/clear orbital phases</font></center>'
phasesD_single = [phaseD_I,self._xph_lig,self._xph_med,self._xph_drk]
phasesD_forspin = self._check_for_actspin(phasesD_single,spinax_swit)
ph_colors = [(1,0,1),cm.gray(0.6),cm.gray(0.3),cm.gray(0)]
orbT_I = 24.0*360.0
see_spins = abs(ratRO_I)/72.0
num_rel = max(res_I*round(see_spins),self.n_long)
rel_tphase = np.linspace(-2.5,2.5,num_rel)
plt.figure(figsize=(14,9.3))
plt.subplot(232)
self.Geometry_Diagram(which='N/A',_active=True,
incD=incD_I,oblD=oblD_I,solD=solD_I,ratRO=ratRO_I,
phaseD=phasesD_single,ph_colors=ph_colors)
### subplot(231) and subplot(233)
self.Orthographic_Viewer(phaseD_I,show='both',_active=True,
orbT_I=orbT_I,ratRO_I=ratRO_I,
incD_I=incD_I,oblD_I=oblD_I,solD_I=solD_I,
longzeroD_I=longzeroD_I)
plt.subplot(234)
n = 0
for p in phasesD_single:
if isinstance(p,(int,float)):
times_I = orbT_I*((p + rel_tphase)/360.0)
self.LightCurve_Plot(alt=False,show=lc_swit,_active=True,
times_I=times_I,orbT_I=orbT_I,ratRO_I=ratRO_I,
incD_I=incD_I,oblD_I=oblD_I,solD_I=solD_I,
longzeroD_I=longzeroD_I,ph_color=ph_colors[n],now_I=n)
n += 1
n = 0
plt.xlim([-2.5,2.5])
plt.xticks(np.linspace(-2,2,5),relph_ticks_,size='medium')
plt.xlabel('Relative Orbital Phase',size='medium')
plt.yticks(size='medium')
ylab = lambda lc: 'Flux' if lc == 'flux' else ('Apparent Brightness' if lc == 'appar' else '')
plt.ylabel(ylab(lc_swit),size='medium')
plt.gca().set_aspect(1.0/plt.gca().get_data_ratio())
plt.text(0.25,1.01,'Light Curve',color='k',size='medium',ha='center',va='bottom',
transform=plt.gca().transAxes)
plt.text(0.75,1.01,'Rotations: {:.2f}'.format(see_spins),color='k',size='medium',ha='center',va='bottom',
transform=plt.gca().transAxes)
### subplot(236)
self.KChar_Evolve_Plot('both',which='_c',incD=incD_I,oblD=oblD_I,solD=solD_I,
_active=True,phasesD_I=phasesD_single,ph_colors=ph_colors)
plt.text(0.5,1.01,'Kernel Characteristics',color='k',size='medium',ha='center',va='bottom',
transform=plt.gca().transAxes)
### subplot(235,'polar')
if len(phasesD_forspin) == 0:
plt.subplot(235,projection='polar')
plt.gca().set_theta_zero_location('S')
plt.gca().set_rlabel_position(45)
plt.xticks(np.linspace(0,1.75*pi,8),sol_ticks_,size='medium',alpha=0.1) # Match numbers to sol_ticks to avoid error.
plt.yticks(np.linspace(0,pi/2.0,4),obl_ticks_,size='medium',alpha=0.1)
plt.gca().axes.spines['polar'].set_alpha(0.1)
plt.gca().grid(alpha=0.1)
bads = ('SPIN AXIS\nCONSTRAINT WARNING:\n\nYOU NEED\nAT LEAST 2 PHASES TO'
'\nCALCULATE CHANGES IN\nDOMINANT COLATITUDE')
plt.text(np.radians(0),np.radians(0),bads,color=(1.0,0.5,0),size='x-large',
ha='center',va='center',weight='bold')
else:
self.SpinAxis_Constraints(phasesD_forspin,which='_c',constraint='perf',
info=False,combine=False,combine_only=True,_active=True,
incD_I=incD_I,solD_I=solD_I,oblD_I=oblD_I)
plt.text(np.radians(225),np.radians(112),'Spin Axis\nConstraints',color='k',size='medium',
ha='center',va='center')
plt.tight_layout()
self.fig_sand = plt.gcf()
plt.show()
def _reset_actmodule(self):
"""Resets attributes for the interactive module."""
self._xph_lig = 'no'
self._xph_med = 'no'
self._xph_drk = 'no'
self._orb_act.close()
self._inc_act.close()
self._obl_act.close()
self._sol_act.close()
self._ratRO_act.close()
self._res_act.close()
self._zlong_act.close()
self._ligcur_act.close()
self._spax_act.close()
self._pslot_act.close()
self._pword_act.close()
self._psav_act.close()
self._pclr_act.close()
self._title_act.close()
self._orb_act.open()
self._inc_act.open()
self._obl_act.open()
self._sol_act.open()
self._ratRO_act.open()
self._res_act.open()
self._zlong_act.open()
| |
disp_staging_sts_operation = {
k : get_message(v,request.user.get_lang_mode(), showMsgId=False)
for k,v in RuleDefs.DISP_STAGING_STS_OPERATION.items()
}
data = {
'msg': msg,
'now': now,
'staging_list': staging_list,
'staging_history_list': staging_history_list,
'apply_rule_manage_id_dic': apply_rule_manage_id_dic,
'pseudo_rule_manage_id_dic': pseudo_rule_manage_id_dic,
'disp_staging_sts_operation': disp_staging_sts_operation,
'stagingPseudoTargetList': staging_pseudo_target,
'stagingPseudoTargetRuleTypeList': staging_pseudo_target_rule_type,
'rule_ids_stg': rule_ids_stg_admin,
'rule_ids_prd': rule_ids_prd_admin,
'permission_type_stg': perm_type_stg,
'permission_type_prd': perm_type_prd,
'lang_mode': request.user.get_lang_mode(),
}
log_data = {
'staging_list_cnt': len(staging_list),
'apply_rule_manage_ids': list(apply_rule_manage_id_dic.keys()),
'pseudo_rule_manage_ids': list(pseudo_rule_manage_id_dic.keys()),
'stagingPseudoTargetList': staging_pseudo_target,
}
logger.logic_log('LOSI00002', json.dumps(log_data, ensure_ascii=False), request=request)
return render(request, 'rule/rule_staging_data.html', data)
@check_allowed_auth(MENU_ID_PRD, defs.MENU_CATEGORY.ALLOW_EVERY)
def rule_production(request):
"""
[メソッド概要]
プロダクションのデータ取得
"""
logger.logic_log('LOSI00001', 'None', request=request)
msg = ''
product_list = []
history_list = []
rule_ids_prd_admin = []
permission_type_prd = request.user_config.get_menu_auth_type(MENU_ID_PRD)
try:
filters = {}
if request and request.method == 'POST':
filters = request.POST.get('filters', "{}")
filters = json.loads(filters)
# 参照以上の権限を持つルール種別IDを取得し、フィルター条件に追加
perm_info_prd = request.user_config.get_activerule_auth_type(MENU_ID_PRD)
rule_ids_prd_admin = perm_info_prd[defs.ALLOWED_MENTENANCE]
product_list, history_list = _select_production(filters, perm_info_prd, request)
except:
msg = get_message('MOSJA12000', request.user.get_lang_mode())
logger.system_log('LOSM12000', traceback.format_exc(), request=request)
data = {
'msg': msg,
'product_list': product_list,
'history_list': history_list,
'rule_ids_prd': rule_ids_prd_admin,
'permission_type_prd': permission_type_prd,
'lang_mode': request.user.get_lang_mode(),
}
log_data = {
'product_list_cnt': len(product_list) + len(history_list),
}
logger.logic_log('LOSI00002', json.dumps(log_data, ensure_ascii=False), request=request)
return render(request, 'rule/rule_production_data.html', data)
@check_allowed_auth(MENU_ID_PRD, defs.MENU_CATEGORY.ALLOW_EVERY)
def rule_history(request):
"""
[メソッド概要]
プロダクション適用履歴のデータ取得
"""
logger.logic_log('LOSI00001', 'None', request=request)
msg = ''
try:
filters = {}
if request and request.method == 'POST':
filters = request.POST.get('filters', "{}")
filters = json.loads(filters)
# 参照以上の権限を持つルール種別IDを取得し、フィルター条件に追加
permission_info_prd = request.user_config.get_activerule_auth_type(MENU_ID_PRD)
rule_ids_prd = []
rule_ids_prd_view = permission_info_prd[defs.VIEW_ONLY]
rule_ids_prd_admin = permission_info_prd[defs.ALLOWED_MENTENANCE]
rule_ids_prd.extend(rule_ids_prd_view)
rule_ids_prd.extend(rule_ids_prd_admin)
if 'rule_type_id' not in filters:
filters['rule_type_id'] = {}
if 'LIST' not in filters['rule_type_id']:
filters['rule_type_id']['LIST'] = []
filters['rule_type_id']['LIST'].extend(rule_ids_stg)
rule_history_list = _select(filters, request)
except:
msg = get_message('MOSJA12000', request.user.get_lang_mode())
logger.system_log('LOSM12000', traceback.format_exc(), request=request)
data = {
'msg': msg,
'history_list': rule_history_list,
'rule_ids_prd': rule_ids_prd_admin,
'lang_mode': request.user.get_lang_mode(),
}
log_data = {
'history_list_cnt': len(rule_history_list),
}
logger.logic_log('LOSI00002', json.dumps(log_data, ensure_ascii=False), request=request)
return render(request, 'rule/rule_history_data.html', data)
@check_allowed_auth(MENU_ID_STG, defs.MENU_CATEGORY.ALLOW_EVERY)
@require_POST
def rule_pseudo_request(request, rule_type_id):
"""
[メソッド概要]
テストリクエスト実行時のリクエストを処理する
"""
logger.logic_log('LOSI00001', 'None', request=request)
err_flg = 1
msg = ''
now = datetime.datetime.now(pytz.timezone('UTC'))
reception_dt = TimeConversion.get_time_conversion(now, 'Asia/Tokyo', request=request)
trace_id = ''
event_dt = '----/--/-- --:--:--'
req_list = []
try:
with transaction.atomic():
json_str = request.POST.get('json_str', None)
post_data = json.loads(json_str)
rule_table_name = post_data[EventsRequestCommon.KEY_RULETYPE]
eventdatetime = post_data[EventsRequestCommon.KEY_EVENTTIME]
eventinfo = post_data[EventsRequestCommon.KEY_EVENTINFO]
if json_str is None:
msg = get_message('MOSJA12002', request.user.get_lang_mode())
logger.user_log('LOSM12007', request=request)
raise Exception()
rt = RuleType.objects.get(rule_type_id=rule_type_id)
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_EVERY:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_STG, chk_auth))
if rt.rule_type_id not in rule_ids:
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12035', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':rt.rule_type_name}, log_params=['Send Request', rt.rule_type_id, rule_ids])
# 入力された情報のバリデーション
errmsg_list = []
_validate_eventdatetime(eventdatetime, errmsg_list, request.user.get_lang_mode())
_validate_eventinfo(rule_type_id, eventinfo, errmsg_list, request.user.get_lang_mode())
if len(errmsg_list):
msg = '\n'.join(errmsg_list) + '\n'
logger.system_log('LOSM12064', 'post_data:%s' % (post_data))
raise Exception()
# RestApiにリクエストを投げる
tkn = _get_token(now)
scheme = urlsplit(request.build_absolute_uri(None)).scheme
url = scheme + '://127.0.0.1:' + request.META['SERVER_PORT'] + reverse('web_app:event:eventsrequest')
r = requests.post(
url,
headers={
'content-type' : 'application/json',
'Authorization' : 'Bearer %s' % (tkn),
},
data=json_str.encode('utf-8'),
verify=False
)
# レスポンスからデータを取得
try:
r_content = json.loads(r.content.decode('utf-8'))
except json.JSONDecodeError:
msg = get_message('MOSJA12012', request.user.get_lang_mode())
logger.user_log('LOSM12052')
raise
# テストリクエストの実行中に失敗した場合
if not r_content["result"]:
msg = r_content["msg"]
logger.user_log('LOSM12001', traceback.format_exc())
raise
trace_id = r_content["trace_id"]
# 該当ルール種別をロック
data_obj_list = DataObject.objects.filter(rule_type_id=rt.pk).order_by('data_object_id')
label_list = []
conditional_name_list = []
for a in data_obj_list:
if a.label not in label_list:
label_list.append(a.label)
conditional_name_list.append(a.conditional_name)
# 実行ログに表示するためのデータ作成
req_list = [
{'conditional_name':conditional_name_list[i], 'value':v}
for i, v in enumerate(eventinfo)
]
event_dt = TimeConversion.get_time_conversion_utc(eventdatetime, 'Asia/Tokyo', request=request)
event_dt = TimeConversion.get_time_conversion(event_dt, 'Asia/Tokyo', request=request)
err_flg = 0
msg = get_message('MOSJA12007', request.user.get_lang_mode(), showMsgId=False)
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
resp_json = {
'err_flg': err_flg,
'msg': msg,
'log_msg': msg,
'trace_id': trace_id,
}
resp_json = json.dumps(resp_json, ensure_ascii=False)
return HttpResponse(resp_json, status=None)
except Exception as e:
logger.system_log('LOSM12050', traceback.format_exc(), request=request)
if not msg:
msg = get_message('MOSJA12001', request.user.get_lang_mode())
resp_json = {
'err_flg': err_flg,
'msg': get_message('MOSJA12023', request.user.get_lang_mode()),
'log_msg': msg,
'trace_id': trace_id,
}
resp_json = json.dumps(resp_json, ensure_ascii=False)
return HttpResponse(resp_json, status=None)
msg = makePseudoCallMessage(msg, reception_dt, event_dt, req_list, request.user.get_lang_mode())
resp_json = {
'err_flg': err_flg,
'msg': get_message('MOSJA12024', request.user.get_lang_mode(), showMsgId=False),
'log_msg': msg,
'trace_id': trace_id,
}
resp_json = json.dumps(resp_json, ensure_ascii=False)
logger.logic_log('LOSI00002', resp_json, request=request)
return HttpResponse(resp_json)
@check_allowed_auth(MENU_ID_STG, defs.MENU_CATEGORY.ALLOW_ADMIN)
@require_POST
def rule_change_status(request):
"""
[メソッド概要]
ステージング適用ルールの運用ステータス変更
"""
logger.logic_log('LOSI00001', 'None', request=request)
msg = ''
err_flg = 1
try:
# パラメーターチェック
status = request.POST.get('status', None)
rule_manage_id = request.POST.get('rule_manage_id', None)
if status is None or rule_manage_id is None:
msg = get_message('MOSJA12002', request.user.get_lang_mode())
logger.user_log('LOSM03005', status, rule_manage_id, request=request)
raise Exception()
status = int(status)
rule_manage_id = int(rule_manage_id)
logger.logic_log('LOSI03000', 'rule_manage_id:%s, status:%s' % (rule_manage_id, status), request=request)
# リクエストステータスの妥当性チェック
if status not in RuleDefs.DISP_STAGING_STS_OPERATION:
msg = get_message('MOSJA12002', request.user.get_lang_mode())
logger.user_log('LOSM03001', status, RuleDefs.DISP_STAGING_STS_OPERATION, request=request)
raise Exception()
with transaction.atomic():
# 該当ルール適用管理テーブルをロック
rule_manage = RuleManage.objects.select_for_update().get(pk=rule_manage_id)
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_ADMIN:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_STG, chk_auth))
if rule_manage.rule_type_id not in rule_ids:
ruletypename = RuleType.objects.get(rule_type_id=rule_manage.rule_type_id).rule_type_name
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12118', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':ruletypename}, log_params=['Change Status', rule_manage.rule_type_id, rule_ids])
# 対象ルールの状態チェック
if rule_manage.request_type_id != defs.STAGING:
msg = get_message('MOSJA12010', request.user.get_lang_mode())
logger.user_log('LOSI03001', 'req_type:%s, expect_type:%s' % (rule_manage.request_type_id, defs.STAGING), request=request)
raise Exception()
if rule_manage.system_status not in RuleDefs.STAGING_OK_STATUSES:
msg = get_message('MOSJA12010', request.user.get_lang_mode())
logger.user_log('LOSI03001', 'sys_sts:%s, expect_sts:%s' % (rule_manage.system_status, RuleDefs.STAGING_OK_STATUSES), request=request)
raise Exception()
if rule_manage.operation_status not in RuleDefs.STAGING_VALIDATE_STATUSES:
msg = get_message('MOSJA12010', request.user.get_lang_mode())
logger.user_log('LOSI03001', 'ope_sts:%s, expect_sts:%s' % (rule_manage.operation_status, RuleDefs.STAGING_VALIDATE_STATUSES), request=request)
raise Exception()
pro_flag = False
rcnt = RuleManage.objects.filter(
rule_type_id=rule_manage.rule_type_id,
request_type_id=defs.PRODUCTION,
rule_file_id=rule_manage.rule_file_id
).exclude(
system_status=defs.RULE_STS_SYSTEM.PRODUCT_NG
).count()
if rcnt == 0 and rule_manage.operation_status == defs.RULE_STS_OPERATION.STAGING:
pro_flag = True
if pro_flag == False and rule_manage.operation_status == defs.RULE_STS_OPERATION.STAGING:
msg = get_message('MOSJA12011', request.user.get_lang_mode())
logger.user_log('LOSI03001', 'pro_count:%s, rule_file_id:%s' % (rcnt, rule_manage.rule_file_id), request=request)
raise Exception()
# 状態更新
rule_manage.operation_status = status
rule_manage.last_update_timestamp = datetime.datetime.now(pytz.timezone('UTC'))
rule_manage.last_update_user = request.user.user_name
rule_manage.save()
msg = get_message('MOSJA12008', request.user.get_lang_mode(), showMsgId=False)
err_flg = 0
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
except RuleManage.DoesNotExist:
logger.system_log('LOSM12002', traceback.format_exc(), request=request)
msg = get_message('MOSJA12009', request.user.get_lang_mode())
except Exception as e:
logger.system_log('LOSM12002', traceback.format_exc(), request=request)
if not msg:
msg = get_message('MOSJA12001', request.user.get_lang_mode())
resp_json = {
'err_flg': err_flg,
'msg': msg,
}
resp_json = json.dumps(resp_json, ensure_ascii=False)
logger.logic_log('LOSI00002', resp_json, request=request)
return HttpResponse(resp_json)
@check_allowed_auth(MENU_ID_STG, defs.MENU_CATEGORY.ALLOW_EVERY)
@require_POST
def rule_get_record(request):
"""
[メソッド概要]
ステージング適用ルール詳細に表示されるデータを取得する
"""
logger.logic_log('LOSI00001', 'None', request=request)
data = {}
msg = ''
err_flg = 0
try:
# パラメーターチェック
rule_manage_id = request.POST.get('rule_manage_id', None)
if rule_manage_id is None:
msg = get_message('MOSJA12002', request.user.get_lang_mode())
raise Exception()
rule_manage_id = int(rule_manage_id)
logger.logic_log('LOSI03000', 'rule_manage_id:%s' % (rule_manage_id), request=request)
rule_manage = RuleManage.objects.get(pk=rule_manage_id)
rule_file_name = RuleFile.objects.get(rule_file_id=rule_manage.rule_file_id).rule_file_name
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_EVERY:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_STG, chk_auth))
if rule_manage.rule_type_id not in rule_ids:
ruletypename = RuleType.objects.get(rule_type_id=rule_manage.rule_type_id).rule_type_name
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12035', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':ruletypename}, log_params=['Select Rule', rule_manage.rule_type_id, rule_ids])
# ステージング権限
permission_info_stg = request.user_config.get_activerule_auth_type(MENU_ID_STG)
rule_ids_stg_admin = permission_info_stg[defs.ALLOWED_MENTENANCE]
except RuleManage.DoesNotExist:
logger.system_log('LOSM12054', traceback.format_exc(), request=request)
msg = get_message('MOSJA12009', request.user.get_lang_mode())
err_flg = 1
except RuleFile.DoesNotExist:
logger.system_log('LOSM12054', traceback.format_exc(), request=request)
msg = get_message('MOSJA12009', request.user.get_lang_mode())
err_flg = 1
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
err_flg = 1
except Exception as e:
logger.system_log('LOSM12054', traceback.format_exc(), request=request)
err_flg = 1
if not msg:
msg = get_message('MOSJA12001', request.user.get_lang_mode())
if err_flg == 0:
# グループ情報取得
_, rule_type_dict = _getRuleTypeData(request)
operation_status_str = get_message(RuleDefs.MST_STS_OPERATION[rule_manage.operation_status],request.user.get_lang_mode(), showMsgId=False)
system_status_str = get_message(RuleDefs.MST_STS_SYSTEM[rule_manage.system_status],request.user.get_lang_mode(), showMsgId=False)
if request.user.get_lang_mode() == 'EN':
last_update_timestamp = rule_manage.last_update_timestamp.astimezone(pytz.timezone('Asia/Tokyo')).strftime('%Y, %m, %d, %H:%M')
else:
last_update_timestamp = rule_manage.last_update_timestamp.astimezone(pytz.timezone('Asia/Tokyo')).strftime('%Y年%m月%d日%H:%M')
data = {
'rule_type_id': rule_manage.rule_type_id,
'rule_type_name': rule_type_dict[rule_manage.rule_type_id]['name'],
'rule_table_name': rule_type_dict[rule_manage.rule_type_id]['table'],
'filename': rule_file_name,
'operation_status_id': rule_manage.operation_status,
'operation_status_str': operation_status_str,
'system_status_str': system_status_str,
'rule_ids_stg': rule_ids_stg_admin,
'last_update_user_name': rule_manage.last_update_user,
'last_update_timestamp': last_update_timestamp,
}
resp_json = {
'data': data,
'err_flg': err_flg,
'msg': msg,
}
resp_json = json.dumps(resp_json, ensure_ascii=False)
logger.logic_log('LOSI00002', resp_json, request=request)
return HttpResponse(resp_json)
@check_allowed_auth(MENU_ID_STG, defs.MENU_CATEGORY.ALLOW_EVERY)
def rule_polling(request, rule_manage_id, trace_id):
"""
[メソッド概要]
テストリクエスト実行中のポーリングリクエストを処理する
"""
logger.logic_log('LOSI00001', 'trace_id:%s, manage_id:%s' % (trace_id, rule_manage_id), request=request)
resp_json = {}
err_flg = 1
is_finish = RuleDefs.RULE_FINISH_STS_NG
msg = ''
add_msg = ''
reception_dt = '----/--/-- --:--:--'
event_dt = '----/--/-- --:--:--'
req_list = []
flg = False
try:
with transaction.atomic():
events_request = EventsRequest.objects.get(trace_id=trace_id)
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_EVERY:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_STG, chk_auth))
if events_request.rule_type_id not in rule_ids:
ruletypename = RuleType.objects.get(rule_type_id=events_request.rule_type_id).rule_type_name
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12035', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':ruletypename}, log_params=['Polling', events_request.rule_type_id, rule_ids])
# テストリクエスト情報を取得
evinfo = ast.literal_eval(events_request.event_info)
evinfo = evinfo['EVENT_INFO'] if 'EVENT_INFO' in evinfo else []
rset = DataObject.objects.filter(rule_type_id=events_request.rule_type_id).order_by('data_object_id')
label_list = []
conditional_name_list = []
for a in rset:
if a.label not in label_list:
label_list.append(a.label)
conditional_name_list.append(a.conditional_name)
for rs, v in zip(conditional_name_list, evinfo):
req_list.append({'conditional_name':rs, 'value':v})
reception_dt = events_request.request_reception_time
| |
data over the connection.
# - We really should wrap a do/try loop around this, so if the send fails,
# that will be handled gracefully. (E.g., if the send fails, do we really
# want to raise the 'sent' flag?)
msg.sent.rise() # Announce that this message has been sent (if anyone cares).
#<------
#|-------------------------------------------------------------------------------------
#| Private methods. (for class Connection)
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
#|-------------------------------------------------------------------------------------
#|
#| Connection.work_catcherrs() [private instance method]
#|
#| Target method for the worker thread created to handle this
#| connection, which is responsible for sending data out to the
#| connection. Basically, this just wraps exception handling
#| for socket errors around the normal work() method.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def work_catcherrs(self): # This is the target method for the new Worker thread created for this connection, responsible for sends.
logger.debug("Connection.run_catcherrs(): About to do Worker.run() wrapped inside a try/except...")
try:
self.work() # Normal Worker.work() method. This worker serializes sendOut() requests.
except SocketBroken:
logger.warn("Connection.run_catcherrs(): Socket malfunctioned during send; exiting sender thread...")
return
# Don't re-raise because we want to just exit relatively quickly.
#<------
#|^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#| End method Connection.work_catcherrs().
#|------------------------------------------
#|--------------------------------------------------------------------------------------
#|
#| Connection._announce() [private instance method]
#|
#| Announces an incoming (or outgoing) message to this connection's
#| list of message handlers.
#|
#| (Does LineConnection need to override this to get the debug
#| messages right?)
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def _announce(self, msg:Message): # Announce an incoming (or outgoing) message to our list of message handlers.
with msg.lock:
already = msg.announced.rise() # Atomically, was it already announced? & mark it announced.
if already: return # If it was already announced, nothing left to do.
way = 'incoming' if msg.dir == DIR_IN else 'outgoing'
if isinstance(msg.data, str):
plain = msg.data
else:
plain = msg.data.decode()
logger.debug("Connection._announce(): Announcing %s message [%s] to our message handlers..."
% (way, plain.strip()))
for h in self.msgHandlers[::-1]: # For each handler in the list (oldest to newest),
logger.debug("Connection._announce(): Announcing %s message [%s] to a [%s] message handler..."
% (way, plain.strip(), h.name))
h.handle(msg) # Tell it to handle the message.
logger.debug("Connection._announce(): Finished announcing %s message [%s] to message handlers..."
% (way, plain.strip()))
#<------
#|---------------------------------------------------------------------------------------
#|
#| Connection._send() [private instance method]
#|
#| Sends a raw chunk of message data to the remote client over
#| this connection. If there is a socket error while sending,
#| we raise a flag and throw a SocketBroken() exception.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def _send(self, data): # Send the given raw data to the client over this connection.
try:
self.req.send(data) # Tell the socket to send the message data.
except socket.error as e:
logger.warn()
self.closed.rise() # Consider the socket closed.
# Here, we should probably make sure it is really closed.
raise SocketBroken("Connection._send(): Socket error [%s] while trying to send to socket... Assuming connection is closed." % e)
#<------
#<--
#|^^^^^^^^^^^^^^^^^^^^^^^^
#| End class Connection.
#|------------------------
class Communicator: pass # Temporary forward declaration for use by BaseConnectionHandler
#|==============================================================================
#|
#| CLASS: BaseConnectionHandler [public abstract class]
#|
#| Base class from which connection-handling classes should derive.
#|
#| Users should create their own derived ConnectionHandler classes
#| that derive from this base class, to do what they want with new
#| connections. A typical connection handler might register one or
#| more application-specific message handlers, and/or may initiate
#| other message-producing processes that will insert messages into
#| the connection's outgoing message stream as needed.
#|
#| ABSTRACT METHODS: (Subclasses should implement these.)
#|
#| handle() Given a connection object, the handler object should
#| do whatever it needs to do with that connection.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
class BaseConnectionHandler: # Base class for connection handling classes.
# I commented out the below because in the handle() method
# we can just do conn.comm to get the communicator. So we
# don't really need a direct .comm data member here.
## def __init__(inst, comm:Communicator):
## inst.comm = comm # What Communicator were we created to handle new connections for.
#|---------------------------------------------------------------------------------------
#|
#| BaseConnectionHandler.handle() [abstract public instance method]
#|
#| This is an abstract method, not implemented here. It is part of the
#| generic interface to connection handlers. Classes derived from
#| BaseConnectionHandler should implement this method. The method
#| should accept a newly-created Connection object, then do whatever
#| the connection handler wants to do with that connection.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def handle(inst, conn:Connection): pass # This does nothing. Subclass needs to override it!
#<--
# End class BaseConnectionHandler
#|==========================================================================
#|
#| CLASS: CommRequestHandler [public class]
#|
#| A request handler (in the sense defined by the module
#| socketserver) that is specific to the needs of the
#| present (communicator) module.
#|
#| This public subclass of the socketserver module's
#| BaseRequestHandler class creates a new Connection
#| object representing the connection request, gives it
#| to all our connection handlers, and then proceeds to
#| indefinitely receive and process messages on the new
#| connection, calling the message handlers for each.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
class CommRequestHandler(socketserver.BaseRequestHandler):
# Extend BaseRequestHandler's __init__ method to accept extra arg conid.
def __init__(self, request, client_address, server, conid):
self.conid = conid
try:
socketserver.BaseRequestHandler.__init__(self, request, client_address, server)
except:
# OK, if the BaseRequestHandler's constructor (which does all the real
# work of handling the request inside this request-handling thread)
# exits by throwing an exception, it probably exited straight from
# .handle() due to a socket error or some such, and therefore the
# .finish() method (which we need to close the terminal window!)
# has no chance to get called. So we call it here.
self.finish()
#-------------------------------------------------------------------
# Method for doing initial setup when the request is first received.
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def setup(self, connClass=Connection):
#-------------------------------------------------------------------------------
# The below is commented out because (1) it doesn't work for some reason; and
# (2) now we are setting the thread's role & component in
# Communicator.process_request() instead.
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
## # Generate a new unique connection ID number among connections to
## # this server. This is for labeling the terminal window for the
## # connection until we determine what node we're connected to.
##
## conid = self.server.conNum()
## logger.debug("CommRequestHandler.setup(): Setting up to handle a new connection request; connection ID#%d." % conid)
##
## # Define the role of the new handler thread that we're in,
## # for logging purposes.
##
## thread = threading.current_thread()
## thread.role = self.server.role + '.con' + str(conid) + ".rcvr"
## #- This means, the current thread is a connection receiving handler
## # for connection #conid for whatever role this communicator is playing.
## logger.debug("CommRequestHandler.setup(): The role string for this receiver thread is declared to be [%s]." % thread.role)
##
## # Also define 'component' which defines which system component
## # is being handled. (In general, we do not know which component
## # it is until the connecting entity identifies itself, so we put
## # 'unknown' for now. Subclasses may override this.
##
## thread.component = 'unknown'
## logger.debug("CommRequestHandler.setup(): The component string for this receiver thread is declared to be [%s]." % thread.component)
##
## # Install the .update_context() method from ThreadActor onto this
## # (non-ThreadActor) receiver thread object. (Alternatively, we
## # could have overridden the .process_request method of
## # socketserver.TCPServer in Communicator to have it create a
## # ThreadActor from the start, instead of a regular thread. Would
## # that have been a cleaner way to do it?)
##
## thread.update_context = bind(thread, logmaster.ThreadActor.update_context)
## # Go ahead and update the current thread's logging context.
## logger.debug("CommRequestHandler.setup(): Updating the current thread's logging context...")
## thread.update_context()
# Create the new Connection object representing this connection.
# This is done using the connClass optional argument so that subclasses
# of CommRequestHandler can substitute a different connection class
# for the default class Connection, if desired. This is done, for
# example, by LineCommReqHandler.
logger.debug("CommRequestHandler.setup(): Creating connection of class %s...", connClass.__name__)
self.conn = connClass(self.conid, # Sequence number for this connection.
self.server, # The Communicator object invoking this handler.
self.request, # The socket for the request (incoming connection).
self, # This | |
created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.
"""
return pulumi.get(self, "health_check_node_port")
@health_check_node_port.setter
def health_check_node_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "health_check_node_port", value)
@property
@pulumi.getter(name="ipFamily")
def ip_family(self) -> Optional[pulumi.Input[str]]:
"""
ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.
"""
return pulumi.get(self, "ip_family")
@ip_family.setter
def ip_family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_family", value)
@property
@pulumi.getter(name="loadBalancerIP")
def load_balancer_ip(self) -> Optional[pulumi.Input[str]]:
"""
Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.
"""
return pulumi.get(self, "load_balancer_ip")
@load_balancer_ip.setter
def load_balancer_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ip", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
"""
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsArgs']]]]:
"""
The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsArgs']]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="publishNotReadyAddresses")
def publish_not_ready_addresses(self) -> Optional[pulumi.Input[bool]]:
"""
publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.
"""
return pulumi.get(self, "publish_not_ready_addresses")
@publish_not_ready_addresses.setter
def publish_not_ready_addresses(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "publish_not_ready_addresses", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/
"""
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "selector", value)
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> Optional[pulumi.Input[str]]:
"""
Supports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
"""
return pulumi.get(self, "session_affinity")
@session_affinity.setter
def session_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_affinity", value)
@property
@pulumi.getter(name="sessionAffinityConfig")
def session_affinity_config(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs']]:
"""
sessionAffinityConfig contains the configurations of session affinity.
"""
return pulumi.get(self, "session_affinity_config")
@session_affinity_config.setter
def session_affinity_config(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs']]):
pulumi.set(self, "session_affinity_config", value)
@property
@pulumi.getter(name="topologyKeys")
def topology_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value "*" may be used to mean "any topology". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.
"""
return pulumi.get(self, "topology_keys")
@topology_keys.setter
def topology_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "topology_keys", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. "ExternalName" maps to the specified externalName. "ClusterIP" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is "None", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. "NodePort" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. "LoadBalancer" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceSpecPortsArgs:
def __init__(__self__, *,
port: pulumi.Input[int],
app_protocol: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
node_port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
target_port: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs']] = None):
"""
ServicePort contains information on service's port.
:param pulumi.Input[int] port: The port that will be exposed by this service.
:param pulumi.Input[str] app_protocol: The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.
:param pulumi.Input[str] name: The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.
:param pulumi.Input[int] node_port: The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
:param pulumi.Input[str] protocol: The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". Default is TCP.
:param pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs'] target_port: Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
"""
pulumi.set(__self__, "port", port)
if app_protocol is not None:
pulumi.set(__self__, "app_protocol", app_protocol)
if name is not None:
pulumi.set(__self__, "name", name)
if node_port is not None:
pulumi.set(__self__, "node_port", node_port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if target_port is not None:
pulumi.set(__self__, "target_port", target_port)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
The port that will be exposed by this service.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="appProtocol")
def app_protocol(self) -> Optional[pulumi.Input[str]]:
"""
The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.
"""
return pulumi.get(self, "app_protocol")
@app_protocol.setter
def app_protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_protocol", | |
in range(len(RBbasis)):
u = RBbasis[i]
if isinstance(bilinear_part, LincombOperator) and self.fin_model is False:
for j, op in enumerate(bilinear_part.operators):
rhs_operators.append(VectorOperator(op.apply(u)))
rhs_coefficients.append(ExpressionParameterFunctional('basis_coefficients[{}]'.format(i),
{'basis_coefficients': (len(RBbasis),)})
* bilinear_part.coefficients[j])
else:
rhs_operators.append(VectorOperator(bilinear_part.apply(u, None)))
rhs_coefficients.append(1. * ExpressionParameterFunctional('basis_coefficients[{}]'.format(i),
{'basis_coefficients': (len(RBbasis),)}))
dual_rhs_operator = LincombOperator(rhs_operators,rhs_coefficients)
dual_intermediate_fom = self.fom.primal_model.with_(rhs = dual_rhs_operator,
parameter_space=None)
dual_reductor = SimpleCoerciveRBReductor(dual_intermediate_fom, RB=self.RBDual,
product=self.opt_product,
coercivity_estimator=self.coercivity_estimator)
non_assembled_reductor = NonAssembledCoerciveRBReductor(dual_intermediate_fom, RB=self.RBDual,
product=self.opt_product, coercivity_estimator=self.coercivity_estimator)
self.non_assembled_dual_rom = non_assembled_reductor.reduce()
dual_rom = dual_reductor.reduce()
return dual_intermediate_fom, dual_rom, dual_reductor
def _build_primal_sensitivity_models(self):
print('building MULTIPLE sensitivity models for {} directions...'.format(self.fom.number_of_parameters))
assert self.primal_rom is not None
assert self.RBPrimal is not None
RBbasis = self.RBPrimal
primal_sensitivity_fom_dict = {}
primal_sensitivity_rom_dict = {}
primal_sensitivity_reductor_dict = {}
for (key,item) in self.primal_fom.parameter_space.parameter_type.items():
index_dict, new_item = self.fom._collect_indices(item)
array_fom = np.empty(new_item, dtype=object)
array_reductor = np.empty(new_item, dtype=object)
array_rom = np.empty(new_item, dtype=object)
for (l, index) in index_dict.items():
if self.unique_basis:
SensitivityBasis = self.RBPrimal
else:
SensitivityBasis = self.RBPrimalSensitivity[key][index]
op_d_mu = self.primal_fom.rhs.d_mu(key, index)
rhs_operators = op_d_mu.operators
rhs_coefficients = op_d_mu.coefficients
for i in range(len(RBbasis)):
u = RBbasis[i]
op = self.primal_fom.operator.d_mu(key,index)
operator = op.with_(operators=[VectorOperator(o.apply(u)) for o in op.operators])
rhs_operators += operator.operators
for l in range(len(operator.operators)):
rhs_coefficients += (operator.coefficients[l] * ExpressionParameterFunctional('-basis_coefficients[{}]'.format(i),
{'basis_coefficients': (len(RBbasis),)}),)
sensitivity_rhs_operator = LincombOperator(rhs_operators,rhs_coefficients)
primal_sensitivity_fom = self.primal_fom.with_(rhs = sensitivity_rhs_operator,
parameter_space=None)
if self.global_greedy is True:
primal_sensitivity_reductor = SimpleCoerciveRBReductor(primal_sensitivity_fom, RB=SensitivityBasis,
product=self.opt_product)
else:
primal_sensitivity_reductor = NonAssembledCoerciveRBReductor(primal_sensitivity_fom, RB=SensitivityBasis,
product=self.opt_product)
primal_sensitivity_rom = primal_sensitivity_reductor.reduce()
array_fom[index] = primal_sensitivity_fom
array_reductor[index] = primal_sensitivity_reductor
array_rom[index] = primal_sensitivity_rom
primal_sensitivity_fom_dict[key] = array_fom
primal_sensitivity_reductor_dict[key] = array_reductor
primal_sensitivity_rom_dict[key] = array_rom
return primal_sensitivity_fom_dict, primal_sensitivity_rom_dict, primal_sensitivity_reductor_dict
def _build_primal_sensitivity_model_for_all_directions(self):
print('building a SINGLE sensitivtiy model for any direction...')
assert self.primal_rom is not None
assert self.RBPrimal is not None
# assert self.fom.separated_bases is False
RBbasis = self.RBPrimal
SensitivityBasis = self.RBPrimal
rhs_operators = ()
rhs_coefficients = ()
k = 0
for (key,item) in self.primal_fom.parameter_space.parameter_type.items():
index_dict, new_item = self.fom._collect_indices(item)
for (l, index) in index_dict.items():
op_d_mu = self.primal_fom.rhs.d_mu(key, index)
rhs_operators += op_d_mu.operators
factor = ProjectionParameterFunctional('eta', (self.fom.number_of_parameters,), (k,))
rhs_coefficients += tuple([factor * op_ for op_ in op_d_mu.coefficients])
for i in range(len(RBbasis)):
u = RBbasis[i]
op = self.primal_fom.operator.d_mu(key,index)
operator = op.with_(operators=[VectorOperator(o.apply(u)) for o in op.operators])
rhs_operators += operator.operators
for l in range(len(operator.operators)):
rhs_coefficients += (operator.coefficients[l] * ExpressionParameterFunctional('-basis_coefficients[{}]'.format(i),
{'basis_coefficients': (len(RBbasis),)}) * factor,)
k += 1
sensitivity_rhs_operator = LincombOperator(rhs_operators,rhs_coefficients)
primal_sensitivity_fom = self.primal_fom.with_(rhs = sensitivity_rhs_operator,
parameter_space=None)
if self.global_greedy is True:
primal_sensitivity_reductor = SimpleCoerciveRBReductor(primal_sensitivity_fom, RB=SensitivityBasis,
product=self.opt_product)
else:
primal_sensitivity_reductor = NonAssembledCoerciveRBReductor(primal_sensitivity_fom, RB=SensitivityBasis,
product=self.opt_product)
primal_sensitivity_rom = primal_sensitivity_reductor.reduce()
return primal_sensitivity_fom, primal_sensitivity_rom, primal_sensitivity_reductor
def _build_dual_sensitivity_models(self):
# print('build_dual_sens_models')
assert self.primal_rom is not None
assert self.RBPrimal is not None
assert self.RBDual is not None
assert self.RBPrimalSensitivity is not None
RBDual = self.RBDual
RBPrimal = self.RBPrimal
RBSens = self.RBPrimalSensitivity
dual_sensitivity_fom_dict = {}
dual_sensitivity_rom_dict = {}
dual_sensitivity_reductor_dict = {}
d_u_bilinear_part = self.fom.output_functional_dict['d_u_bilinear_part']
d_u_linear_part = self.fom.output_functional_dict['d_u_linear_part']
for (key,item) in self.primal_fom.parameter_space.parameter_type.items():
index_dict, new_item = self.fom._collect_indices(item)
array_fom = np.empty(new_item, dtype=object)
array_reductor = np.empty(new_item, dtype=object)
array_rom = np.empty(new_item, dtype=object)
for (l, index) in index_dict.items():
if self.unique_basis:
SensitivityBasis = self.RBDual
else:
SensitivityBasis = self.RBDualSensitivity[key][index]
rhs_operators = (ZeroOperator(d_u_linear_part.range, d_u_linear_part.source),)
rhs_coefficients = (0,)
# dual residual d_mu part
for i in range(len(RBDual)):
p = RBDual[i]
op = self.primal_fom.operator.d_mu(key,index)
operator = op.with_(operators=[VectorOperator(o.apply(p)) for o in op.operators])
rhs_operators += operator.operators
for l in range(len(operator.operators)):
rhs_coefficients += (operator.coefficients[l] * ExpressionParameterFunctional('-basis_coefficients_dual[{}]'.format(i),
{'basis_coefficients_dual': (len(RBDual),)}),)
if isinstance(d_u_bilinear_part, LincombOperator) and self.fin_model is False:
for i in range(len(RBPrimal)):
u = RBPrimal[i]
k_op = d_u_bilinear_part.d_mu(key,index)
k_operator = k_op.with_(operators=[VectorOperator(o.apply(u)) for o in k_op.operators])
rhs_operators += k_operator.operators
for l in range(len(k_operator.operators)):
rhs_coefficients += (k_operator.coefficients[l] *
ExpressionParameterFunctional('basis_coefficients[{}]'.format(i),
{'basis_coefficients':
(len(RBPrimal),)}),)
if isinstance(d_u_linear_part, LincombOperator) and self.fin_model is False:
j_op = d_u_linear_part.d_mu(key,index)
rhs_operators += j_op.operators
for l in range(len(j_op.operators)):
rhs_coefficients += (j_op.coefficients[l],)
# 2k(q, d_mu_u) part
if self.unique_basis:
for i in range(len(RBSens)):
u_d = RBSens[i]
if isinstance(d_u_bilinear_part, LincombOperator) and self.fin_model is False:
for j, op in enumerate(d_u_bilinear_part.operators):
rhs_operators += (VectorOperator(op.apply(u_d)),)
rhs_coefficients += (ExpressionParameterFunctional('basis_coefficients_primal_sens[{}]'.format(i),
{'basis_coefficients_primal_sens': (len(RBSens),)})
* d_u_bilinear_part.coefficients[j],)
else:
rhs_operators += (VectorOperator(d_u_bilinear_part.apply(u_d), None),)
rhs_coefficients += (ExpressionParameterFunctional('basis_coefficients_primal_sens[{}]'.format(i),
{'basis_coefficients_primal_sens':
(len(RBSens),)}),)
else:
for i in range(len(RBSens[key][index])):
u_d = RBSens[key][index][i]
if isinstance(d_u_bilinear_part, LincombOperator) and self.fin_model is False:
for j, op in enumerate(d_u_bilinear_part.operators):
rhs_operators += (VectorOperator(op.apply(u_d)),)
rhs_coefficients += (ExpressionParameterFunctional('basis_coefficients_primal_sens[{}]'.format(i),
{'basis_coefficients_primal_sens':
(len(RBSens[key][index]),)}) * d_u_bilinear_part.coefficients[j],)
else:
rhs_operators += (VectorOperator(d_u_bilinear_part.apply(u_d, None)),)
rhs_coefficients += (ExpressionParameterFunctional('basis_coefficients_primal_sens[{}]'.format(i),
{'basis_coefficients_primal_sens':
(len(RBSens[key][index]),)}),)
sensitivity_rhs_operator = LincombOperator(rhs_operators,rhs_coefficients)
dual_sensitivity_fom = self.primal_fom.with_(rhs = sensitivity_rhs_operator,
parameter_space=None)
if self.global_greedy is True:
dual_sensitivity_reductor = SimpleCoerciveRBReductor(dual_sensitivity_fom, RB=SensitivityBasis,
product=self.opt_product)
else:
dual_sensitivity_reductor = NonAssembledCoerciveRBReductor(dual_sensitivity_fom, RB=SensitivityBasis,
product=self.opt_product)
dual_sensitivity_rom = dual_sensitivity_reductor.reduce()
array_fom[index] = dual_sensitivity_fom
array_reductor[index] = dual_sensitivity_reductor
array_rom[index] = dual_sensitivity_rom
dual_sensitivity_fom_dict[key] = array_fom
dual_sensitivity_reductor_dict[key] = array_reductor
dual_sensitivity_rom_dict[key] = array_rom
return dual_sensitivity_fom_dict, dual_sensitivity_rom_dict, dual_sensitivity_reductor_dict
def _build_dual_sensitivity_model_for_all_directions(self):
assert self.primal_rom is not None
assert self.RBPrimal is not None
assert self.RBDual is not None
RBDual = self.RBDual
RBPrimal = self.RBPrimal
RBSens = self.RBPrimal
SensitivityBasis = self.RBDual
d_u_bilinear_part = self.fom.output_functional_dict['d_u_bilinear_part']
d_u_linear_part = self.fom.output_functional_dict['d_u_linear_part']
rhs_operators = (ZeroOperator(d_u_linear_part.range,d_u_linear_part.source),)
rhs_coefficients = (0.,)
k = 0
for (key,item) in self.primal_fom.parameter_space.parameter_type.items():
index_dict, new_item = self.fom._collect_indices(item)
for (l, index) in index_dict.items():
factor = ProjectionParameterFunctional('eta', (self.fom.number_of_parameters,), (k,))
# dual residual d_mu part
for i in range(len(RBDual)):
p = RBDual[i]
op = self.primal_fom.operator.d_mu(key,index)
operator = op.with_(operators=[VectorOperator(o.apply(p)) for o in op.operators])
rhs_operators += operator.operators
for l in range(len(operator.operators)):
rhs_coefficients += (operator.coefficients[l] * ExpressionParameterFunctional('-basis_coefficients_dual[{}]'.format(i),
{'basis_coefficients_dual': (len(RBDual),)}) * factor,)
if isinstance(d_u_bilinear_part, LincombOperator) and self.fin_model is False:
for i in range(len(RBPrimal)):
u = RBPrimal[i]
k_op = d_u_bilinear_part.d_mu(key,index)
k_operator = k_op.with_(operators=[VectorOperator(o.apply(u)) for o in k_op.operators])
rhs_operators += k_operator.operators
for l in range(len(k_operator.operators)):
rhs_coefficients += (k_operator.coefficients[l] *
ExpressionParameterFunctional('basis_coefficients[{}]'.format(i),
{'basis_coefficients':
(len(RBPrimal),)}) * factor,)
if isinstance(d_u_linear_part, LincombOperator) and self.fin_model is False:
j_op = d_u_linear_part.d_mu(key,index)
rhs_operators += j_op.operators
for l in range(len(j_op.operators)):
rhs_coefficients += (j_op.coefficients[l] * factor,)
k += 1
# 2k(q, d_mu_u) part
for i in range(len(RBSens)):
u_d = RBSens[i]
if isinstance(d_u_bilinear_part, LincombOperator) and self.fin_model is False:
for j, op in enumerate(d_u_bilinear_part.operators):
rhs_operators += (VectorOperator(op.apply(u_d)),)
rhs_coefficients += (ExpressionParameterFunctional('basis_coefficients_primal_sens[{}]'.format(i),
{'basis_coefficients_primal_sens': (len(RBSens),)})
* d_u_bilinear_part.coefficients[j],)
else:
rhs_operators += (VectorOperator(d_u_bilinear_part.apply(u_d), None),)
rhs_coefficients += (ExpressionParameterFunctional('basis_coefficients_primal_sens[{}]'.format(i),
{'basis_coefficients_primal_sens':
(len(RBSens),)}),)
sensitivity_rhs_operator = LincombOperator(rhs_operators,rhs_coefficients)
dual_sensitivity_fom = self.primal_fom.with_(rhs = sensitivity_rhs_operator,
parameter_space=None)
if self.global_greedy is True:
dual_sensitivity_reductor = SimpleCoerciveRBReductor(dual_sensitivity_fom, RB=SensitivityBasis,
product=self.opt_product)
else:
dual_sensitivity_reductor = NonAssembledCoerciveRBReductor(dual_sensitivity_fom, RB=SensitivityBasis,
product=self.opt_product)
dual_sensitivity_rom = dual_sensitivity_reductor.reduce()
return dual_sensitivity_fom, dual_sensitivity_rom, dual_sensitivity_reductor
def _construct_zero_dict(self, parameter_type):
#prepare dict
zero_dict = {}
for key, item in parameter_type.items():
_, new_item = self.fom._collect_indices(item)
zero_ = np.empty(new_item, dtype=object)
zero_dict[key] = zero_
return zero_dict
#prepare dict
def _construct_zero_dict_dict(self, parameter_type):
zero_dict = {}
for key, item in parameter_type.items():
index_dict, new_item = self.fom._collect_indices(item)
zero_ = np.empty(new_item, dtype=dict)
zero_dict[key] = zero_
for (l, index) in index_dict.items():
zero_dict[key][index] = self._construct_zero_dict(parameter_type)
return zero_dict
def assemble_estimator(self):
# I need the output in advance
self.projected_output = self.project_output()
# print_pieces
print_pieces = 0
estimators = {}
# primal
class PrimalCoerciveRBEstimator(ImmutableObject):
def __init__(self, primal_rom, non_assembled_rom=None):
self.__auto_init(locals())
def estimate(self, U, mu, non_assembled=False):
if non_assembled and self.non_assembled_rom is not None:
return self.non_assembled_rom.estimate(U, mu)
else:
return self.primal_rom.estimate(U, mu)
estimators['primal'] = PrimalCoerciveRBEstimator(self.primal_rom, self.non_assembled_primal_rom)
##########################################
# dual
class DualCoerciveRBEstimator(ImmutableObject):
def __init__(self, coercivity_estimator, cont_k, primal_estimator, dual_rom, non_assembled_rom=None):
self.__auto_init(locals())
def estimate(self, U, P, mu, non_assembled=False):
primal_estimate = self.primal_estimator.estimate(U, mu, non_assembled=non_assembled)[0]
if non_assembled and self.non_assembled_rom is not None:
dual_intermediate_estimate = self.non_assembled_rom.estimate(P, mu)[0]
else:
dual_intermediate_estimate = self.dual_rom.estimate(P, mu)
if print_pieces or 0:
print(self.cont_k(mu), self.coercivity_estimator(mu), primal_estimate, dual_intermediate_estimate)
return 2* self.cont_k(mu) /self.coercivity_estimator(mu) * primal_estimate + dual_intermediate_estimate
estimators['dual'] = DualCoerciveRBEstimator(self.coercivity_estimator, self.cont_k, estimators['primal'], self.dual_rom, self.non_assembled_dual_rom)
##########################################
# output hat
class output_hat_RBEstimator(ImmutableObject):
def __init__(self, coercivity_estimator, cont_k, cont_j, primal_estimator, dual_estimator,
projected_output, dual_rom, P_product, U_product, corrected_output):
self.__auto_init(locals())
def estimate(self, U, P, mu, residual_based=True, both_estimators=False):
if residual_based:
primal_estimate = self.primal_estimator.estimate(U, mu)[0]
dual_estimate = self.dual_estimator.estimate(U, P, mu)
residual_lhs = self.projected_output['primal_dual_projected_op'].apply2(U, P, mu=mu)[0][0]
residual_rhs = self.projected_output['dual_projected_rhs'].apply_adjoint(P, mu=mu).to_numpy()[0][0]
if print_pieces or 0:
print(self.coercivity_estimator(mu), primal_estimate, dual_estimate, primal_estimate**2,
self.cont_k(mu), primal_estimate, self.coercivity_estimator(mu))
if both_estimators:
est1 = self.coercivity_estimator(mu) * primal_estimate * dual_estimate + \
primal_estimate**2 * self.cont_k(mu)
est2 = est1 + np.abs(residual_rhs - residual_lhs)
if self.corrected_output:
return [est1, est2]
else:
return [est2, est1]
if self.corrected_output:
est1 = self.coercivity_estimator(mu) * primal_estimate * dual_estimate + \
primal_estimate**2 * self.cont_k(mu)
return est1
else:
est2 = self.coercivity_estimator(mu) * primal_estimate * dual_estimate + \
primal_estimate**2 * self.cont_k(mu) + \
+ np.abs(residual_rhs - residual_lhs)
return est2
else:
primal_estimate = self.primal_estimator.estimate(U, mu)[0]
norm_U = np.sqrt(self.U_product.apply2(U, U))[0][0]
if print_pieces or 0:
print(primal_estimate, self.cont_j(mu), self.cont_k(mu), norm_U, primal_estimate)
return primal_estimate * ( self.cont_j(mu) + self.cont_k(mu) * \
(2 * norm_U + primal_estimate))
estimators['output_functional_hat'] = output_hat_RBEstimator(self.coercivity_estimator,
self.cont_k, self.cont_j,
estimators['primal'], estimators['dual'],
self.projected_output, self.dual_rom, self.dual_rom.opt_product,
self.primal_rom.opt_product,
self.fom.use_corrected_functional)
##########################################
estimators['u_d_mu'] = None
estimators['p_d_mu'] = None
estimators['output_functional_hat_d_mus'] = None
# sensitivity_u
class | |
# -*- coding: utf-8 -*-
'''
Module: jelephant.analysis.sta
Contains functions to calculate spike-triggered averages of AnalogSignals.
'''
import numpy as np
import scipy.signal
import quantities as pq
from neo.core import AnalogSignal, AnalogSignalArray
if __name__ == '__main__':
pass
#===============================================================================
# Spike-triggered average main functions
#===============================================================================
def sta(lfps, spiketrains, window, method="correlation", crosstrials=False, single_data=None):
"""
Calls the resective sta function specified by 'method'. 'method' can either be 'correlation' for
correlation-based STA calculation or 'average' for average-based STA calculation.
**Args**:
lfps: AnalogSignal object or AnalogSignalArray object or list of AnalogSignals
spikes: SpikeTrain or list of SpikeTrains objects, its time intervall needs to
be completely covered by the lfp
window: positive time interval to specify the cutout around spikes given as Quantity or
number of bins to use
method: default 'correlation'. Specifies method to calculate STA
crosstrials: indicates if STA is averaged over all provided trials or calculated trial-wise
default value 'False'
True: STA is calculated for each pair of lfp and spiketrain an is averaged afterwards
False: STAs are calculated for each pair of lfp and spiketrain and are returned as list
single_data: (None,'train','lfp')
specifies whether one (first) spiketrain is used for all STAs ('train'),
each AnalogSignal comes with its own spiketrain (None, Default) or one (first)
Analogsignal is used for all spiketrains ('lfp') Default value 'None'
**Return**:
Returns a tuple (STA,time,used_spikes), where STA is a list of one-dimensional arrays with
the spike triggered average, and time is a list of the corresponding time bins.
The length of the respective array is defined by 2*window + 1, where window is
the number of bins around the spike times used.
used_spikes contains the number of spikes used for the STA. If the spiketrain did
not contain suitable spikes, the returned STA will be filled with zeros.
**Example**:
(result,time,used_spikes)=sta_corr(lfp,spiketrain,Quantity(10,"ms"))
matplotlib.pyplot.plot(time,result)
"""
if single_data == 'lfp':
# ## 1 ### In case of single lfp provided
# wrapping spiketrains
if isinstance(spiketrains, np.ndarray):
box = []
box.append(spiketrains)
spiketrains = box
# if (lfps has usefull type)
if (isinstance(lfps, np.ndarray) or isinstance(lfps, list)):
# (itselfe is data, but also contains lists) or (contains only one list as first element))
if (len(lfps) > 1 and (isinstance(lfps[0], list) or isinstance(lfps[0], np.ndarray))):
pass
elif (len(lfps == 1) and not(isinstance(lfps[0], list) or isinstance(lfps[0], np.ndarray))):
# unwrapping lfps
lfps = lfps[0]
else:
raise ValueError("There is no single lfp signal present in the supplied lfp signal")
else:
raise ValueError("Supplied LFP does not have the correct data format but %s" % (str(type(lfps))))
loops = len(spiketrains)
result = []
for i in range(loops):
if method == "corr" or method == "correlation":
result.append(sta_corr(lfps, spiketrains[i], window, crosstrials, single_data))
elif method == "aver" or method == "average":
result.append(sta_average(lfps, spiketrains[i], window, crosstrials, single_data))
else:
raise ValueError("Specified STA method is not available. Please use 'correlation' or 'average'")
if single_data == 'lfp':
return averaging_STAs([a[0] for a in result], [a[2] for a in result]), result[0][1], np.sum([a[2] for a in result])
return result[0]
# ## 2 ### normal calling of sta function in case of single_data != 'lfp'
if method == "corr" or method == "correlation":
return (sta_corr(lfps, spiketrains, window, crosstrials, single_data))
elif method == "aver" or method == "average":
return (sta_average(lfps, spiketrains, window, crosstrials, single_data))
else:
raise ValueError("Specified STA method is not available. Please use 'correlation' or 'average'")
def sta_corr(lfps, spiketrains, window, crosstrials=False, single_data=None):
"""
Calculates the respective spike-triggered average of a analog signals of multiple trials
by binning the spiketrain and correlation of lfp and respective spiketrain.
Calculates the spike triggered average of a AnalogSignal or AnalogSignalArray object in a
time window +-window around the spike times in a SpikeTrain object.
**Args**:
lfps: AnalogSignal object or AnalogSignalArray object or list of AnalogSignals
spikes: SpikeTrain or list of SpikeTrains objects, its time intervall needs to
be completely covered by the lfp
window: positive time interval to specify the cutout around spikes given as Quantity or
number of bins to use
crosstrail: indicates if STA is averaged over all provided trials or calculated trial-wise
**Return**:
Returns a tuple (STA,time,used_spikes), where STA is a list of one-dimensional arrays with
the spike triggered average, and time is a list of the corresponding time bins.
The length of the respective array is defined by 2*window + 1, where window is
the number of bins around the spike times used.
used_spikes contains the number of spikes used for the STA. If the spiketrain did
not contain suitable spikes, the returned STA will be filled with zeros.
**Example**:
(result,time,used_spikes)=sta_corr(lfp,spiketrain,Quantity(10,"ms"))
matplotlib.pyplot.plot(time,result)
"""
# checking compatibility of data, calculating parameters of trials
(lfps, spiketrains, window_times, wrapped, num_trials, window_bins, st_lfp_offsetbins, spiketrainbins) = data_quality_check(lfps, spiketrains, window, crosstrials, single_data)
# create binned spiketrains of spikes in suitable time window
st_binned = []
for trial in np.arange(num_trials):
# binning spiketrain with respect to its starting time
st_binned.append(np.zeros(spiketrainbins[trial], dtype=int))
for t in spiketrains[trial]:
# calculating spikebin from spiketime (respective to spiketrainstart)
spikebin = int(np.round(float(t - spiketrains[trial].t_start) / (spiketrains[trial].t_stop - spiketrains[trial].t_start) * spiketrainbins[trial]))
# checking if lfp signal around spiketime t is available
if spikebin + st_lfp_offsetbins[trial] > window_bins[trial] and len(lfps[trial]) - (st_lfp_offsetbins[trial] + spikebin) > window_bins[trial]:
# adds 1 to the bin corresponding to spiketime t
st_binned[trial][spikebin] += 1
# use the correlation function to calculate the STA
result_sta = []
result_time = []
used_spikes = []
for trial in np.arange(num_trials):
if all(np.equal(st_binned[trial] , 0)): # This is slow!
print "No suitable spikes in trial detected. Reduce window size or supply more LFP data."
output = np.zeros(2 * window_bins[trial] + 1) * lfps[trial].units
result_sta.append(output)
# used_spikes.append(0)
else:
# cutting correct segment of lfp with respect to additional information outside of spiketrain intervall
lfp_start = st_lfp_offsetbins[trial] - window_bins[trial]
pre = []
post = []
if lfp_start < 0:
pre = np.zeros(-lfp_start)
lfp_start = 0
lfp_stop = st_lfp_offsetbins[trial] + spiketrainbins[trial] + window_bins[trial]
if lfp_stop > len(lfps[trial]):
post = np.zeros(lfp_stop - len(lfps[trial]))
lfp_stop = len(lfps[trial])
# appending pre and post for symetrie reasons of correlation
lfp = lfps[trial][lfp_start:lfp_stop]
if pre != []:
lfp = np.append(pre, lfp)
if post != []:
lfp = np.append(lfp, post)
# actual calculation of correlation and therefore STA of both signals
output = scipy.signal.correlate(lfp, st_binned[trial], mode='same') / np.sum(st_binned[trial]) * lfps[trial].units
bin_start = int(len(output) / 2) - window_bins[trial]
bin_end = int(len(output) / 2) + window_bins[trial]
# one additional bin to cut STA symmetrically around time = 0
result_sta.append(output[bin_start: bin_end + 1])
result_time.append(np.arange(-window_times[trial], (window_times[trial] + 1 / lfps[trial].sampling_rate).rescale(window_times[trial].units), (1 / lfps[trial].sampling_rate).rescale(window_times[trial].units))[0: 2 * window_bins[trial] + 1] * window_times[trial].units)
used_spikes.append(int(np.sum(st_binned[trial])))
# Averaging over all trials in case of crosstrialing
if crosstrials:
result_sta[0] = averaging_STAs(result_sta, used_spikes)
# Returns array in case only single LFP and spiketrains was passed
if wrapped or crosstrials:
return result_sta[0], result_time[0], used_spikes[0]
else:
return result_sta, result_time, used_spikes
#-------------------------------------------------------------------------------
def sta_average(lfps, spiketrains, window, crosstrials=False, single_data=None):
"""
Calculates the respective spike-triggered average of a analog signals of multiple trials
by averaging the respective parts of the lfp signal.
Calculates the spike triggered average of a neo AnalogSignal or AnalogSignal object in a
time window +-window around the spike times in a SpikeTrain object. Acts the same as
analysis.sta_corr(lfps, spiketrains, window)
**Args**:
lfps: AnalogSignal object or AnalogSignalArray object
spikes: SpikeTrain or list of SpikeTrains objects
window: positive time interval to specify the cutout around given as Quantity or
number of bins to use
crosstrail: indicates if STA is averaged with all given trial or calculated trial-wise
**Return**:
Returns a tuple (STA,time,used_spikes), where STA is a list of one-dimensional arrays with
the spike triggered average, and time is a list of the corresponding time bins.
The length of the respective array is defined by 2*window + 1, where window is
the number of bins around the spike times used.
used_spikes contains the number of spikes used for the STA. If the spiketrain did
not contain suitable spikes, the returned STA will be filled with zeros.
**Example**:
(result,time,used_spikes)=sta_average([lfp1,lfp2], [spiketrain1,spiketrain2], Quantity(10,"ms"), crosstrials)
matplotlib.pyplot.plot(time,result)
"""
# checking compatibility of data, calculating parameters of trials
(lfps, spiketrains, window_times, wrapped, num_trials, | |
<gh_stars>10-100
# Apache v2 license
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# This file is copied from https://github.com/clovaai/length-adaptive-transformer
# coding=utf-8
# Length-Adaptive Transformer
# Copyright (c) 2020-present NAVER Corp.
# Apache License v2.0
#####
# Original code is from https://github.com/huggingface/transformers
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DistilBERT model
adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
and in part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
"""
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from transformers.file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from transformers.modeling_outputs import (
BaseModelOutput,
SequenceClassifierOutput,
QuestionAnsweringModelOutput,
)
from transformers.models.distilbert.modeling_distilbert import (
_CONFIG_FOR_DOC,
_TOKENIZER_FOR_DOC,
Embeddings,
MultiHeadSelfAttention,
FFN,
DistilBertPreTrainedModel,
DISTILBERT_START_DOCSTRING,
DISTILBERT_INPUTS_DOCSTRING,
)
from length_adaptive_transformer.modeling_utils import expand_gather
class TransformerBlock(nn.Module):
def __init__(self, config):
super().__init__()
assert config.dim % config.n_heads == 0
self.attention = MultiHeadSelfAttention(config)
self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
self.ffn = FFN(config)
self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_length=None,
always_keep_cls_token=True,
):
"""
Parameters
----------
hidden_states: torch.tensor(bs, seq_length, dim)
attention_mask: torch.tensor(bs, seq_length)
Outputs
-------
attention_probs: torch.tensor(bs, n_heads, seq_length, seq_length)
The attention weights
layer_output: torch.tensor(bs, seq_length, dim)
The output of the transformer block contextualization.
"""
# Self-Attention
self_attention_outputs = self.attention(
query=hidden_states,
key=hidden_states,
value=hidden_states,
mask=attention_mask,
head_mask=head_mask,
output_attentions = output_attentions,
)
if output_attentions:
attention_output, attention_probs = self_attention_outputs # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
else: # To handle these `output_attention` or `output_hidden_states` cases returning tuples
assert type(self_attention_outputs) == tuple
attention_output = self_attention_outputs[0]
attention_output = self.sa_layer_norm(attention_output + hidden_states) # (bs, seq_length, dim)
if output_length is not None:
assert output_attentions
significance_score = attention_probs.sum(2).sum(1) # - attention_probs.diagonal(0, 2, 3)
if always_keep_cls_token:
keep_indices = significance_score[:, 1:].topk(output_length - 1, 1)[1] + 1
cls_index = keep_indices.new_zeros((keep_indices.size(0), 1))
keep_indices = torch.cat((cls_index, keep_indices), 1)
else:
keep_indices = significance_score.topk(output_length, 1)[1]
# keep_indices = keep_indices.sort(1)[0]
attention_output = expand_gather(attention_output, 1, keep_indices.unsqueeze(-1))
else:
keep_indices = None
# Feed Forward Network
layer_output = self.ffn(attention_output) # (bs, seq_length, dim)
layer_output = self.output_layer_norm(layer_output + attention_output) # (bs, seq_length, dim)
output = (layer_output,)
if output_attentions:
output = (attention_probs,) + output
return output, keep_indices
class Transformer(nn.Module):
def __init__(self, config):
super().__init__()
self.num_hidden_layers = config.n_layers
self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=None,
layer_config=None,
length_config=None,
always_keep_cls_token=True,
):
"""
Parameters
----------
hidden_states: torch.tensor(bs, seq_length, dim)
Input sequence embedded.
attention_mask: torch.tensor(bs, seq_length)
Attention mask on the sequence.
Outputs
-------
hidden_state: torch.tensor(bs, seq_length, dim)
Sequence of hiddens states in the last (top) layer
all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
Tuple of length n_layers with the hidden states from each layer.
Optional: only if output_hidden_states=True
all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
Tuple of length n_layers with the attention weights from each layer
Optional: only if output_attentions=True
"""
bsz, tsz, dim = hidden_states.size()
if length_config is not None:
restored_hidden_states = hidden_states
remain_indices = torch.arange(tsz, device=hidden_states.device).unsqueeze(0).repeat(bsz, 1)
all_hidden_states = () if output_hidden_states else None
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states, )
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if layer_config is not None and i not in layer_config:
continue
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_output_length = length_config[i] if length_config is not None else None
layer_outputs, keep_indices = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
output_attentions,
output_length=layer_output_length,
always_keep_cls_token=always_keep_cls_token,
)
hidden_states = layer_outputs[-1]
if layer_output_length:
remain_indices = remain_indices.gather(1, keep_indices)
restored_hidden_states = restored_hidden_states.scatter(1, remain_indices.unsqueeze(-1).expand(-1, -1, dim), hidden_states)
if attention_mask is not None:
attention_mask = attention_mask.gather(1, keep_indices)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if output_attentions:
assert len(layer_outputs) == 2
attentions = layer_outputs[0]
all_attentions = all_attentions + (attentions,)
else:
assert len(layer_outputs) == 1
last_hidden_state = restored_hidden_states if length_config is not None else hidden_states
if not return_dict:
return tuple(v for v in [last_hidden_state, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=last_hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
)
@add_start_docstrings(
"The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
DISTILBERT_START_DOCSTRING,
)
class DistilBertModel(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = Embeddings(config) # Embeddings
self.transformer = Transformer(config) # Encoder
self.init_weights()
self.length_config = None
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.transformer.layer[layer].attention.prune_heads(heads)
def set_length_config(self, length_config):
self.length_config = length_config
@add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="distilbert-base-uncased",
output_type=BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
@add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="distilbert-base-uncased")
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
layer_config=None,
length_config=None,
always_keep_cls_token=True,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.n_layers)
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids) # (bs, seq_length, dim)
return self.transformer(
inputs_embeds,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
layer_config=layer_config,
length_config=length_config if length_config is not None else self.length_config,
always_keep_cls_token=always_keep_cls_token,
)
@add_start_docstrings(
"""DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
DISTILBERT_START_DOCSTRING,
)
class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.distilbert = DistilBertModel(config)
self.pre_classifier = nn.Linear(config.dim, config.dim)
self.classifier = nn.Linear(config.dim, config.num_labels)
self.dropout = nn.Dropout(config.seq_classif_dropout)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
layer_config=None,
length_config=None,
always_keep_cls_token=True,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
distilbert_output = self.distilbert(
input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
layer_config=layer_config,
length_config=length_config,
always_keep_cls_token=always_keep_cls_token,
)
hidden_state = distilbert_output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
pooled_output = self.dropout(pooled_output) # (bs, dim)
logits = self.classifier(pooled_output) # (bs, dim)
loss = None
if labels is not None:
if self.num_labels == 1:
loss_fct = nn.MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + distilbert_output[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=distilbert_output.hidden_states,
attentions=distilbert_output.attentions,
)
@add_start_docstrings(
"""DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
DISTILBERT_START_DOCSTRING,
)
class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.distilbert = DistilBertModel(config)
self.qa_outputs = nn.Linear(config.dim, config.num_labels)
assert config.num_labels == 2
self.dropout = nn.Dropout(config.qa_dropout)
self.init_weights()
@add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="distilbert-base-uncased",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
layer_config=None,
length_config=None,
always_keep_cls_token=False,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token | |
<gh_stars>10-100
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from . import CustomTestCase
from ..xform_utils import (
__find_by_key_value as find_value,
__get_all_paths as get_paths,
__get_avro_primitive_type as get_type,
__get_xform_choices as get_choices,
__get_xform_instance as get_instance,
__get_xform_itexts as get_texts,
__get_xform_label as get_label,
__parse_xml_to_dict as parse_xml_to_dict,
__validate_avro_name as validate_avro_name,
get_instance_data_from_xml,
parse_submission,
parse_xform_file,
parse_xform_to_avro_schema,
validate_xform,
XFormParseError,
)
class XFormUtilsValidatorsTests(CustomTestCase):
def test__validate_xform__not_valid(self):
with self.assertRaises(XFormParseError) as ve:
validate_xform(self.samples['xform']['xml-err'])
self.assertIn('Not valid xForm definition.', str(ve.exception), ve)
def test__validate_xform__missing_required__html(self):
with self.assertRaises(XFormParseError) as ve:
validate_xform(
'''
<html></html>
'''
)
self.assertIn('Missing required tags:', str(ve.exception), ve)
self.assertIn('<h:html>', str(ve.exception), ve)
def test__validate_xform__missing_required__html__children(self):
with self.assertRaises(XFormParseError) as ve:
validate_xform(
'''
<h:html
xmlns="http://www.w3.org/2002/xforms"
xmlns:h="http://www.w3.org/1999/xhtml">
</h:html>
'''
)
self.assertIn('Missing required tags:', str(ve.exception), ve)
self.assertIn('<h:body> in <h:html>', str(ve.exception), ve)
self.assertIn('<h:head> in <h:html>', str(ve.exception), ve)
def test__validate_xform__missing_required__head__children(self):
with self.assertRaises(XFormParseError) as ve:
validate_xform(
'''
<h:html
xmlns="http://www.w3.org/2002/xforms"
xmlns:h="http://www.w3.org/1999/xhtml">
<h:head>
</h:head>
<h:body/>
</h:html>
'''
)
self.assertIn('Missing required tags:', str(ve.exception), ve)
self.assertIn('<h:title> in <h:html><h:head>', str(ve.exception), ve)
self.assertIn('<model> in <h:html><h:head>', str(ve.exception), ve)
def test__validate_xform__missing_required__model__children(self):
with self.assertRaises(XFormParseError) as ve:
validate_xform(
'''
<h:html
xmlns="http://www.w3.org/2002/xforms"
xmlns:h="http://www.w3.org/1999/xhtml">
<h:head>
<model>
</model>
</h:head>
<h:body/>
</h:html>
'''
)
self.assertIn('Missing required tags:', str(ve.exception), ve)
self.assertIn('<instance> in <h:html><h:head><model>', str(ve.exception), ve)
def test__validate_xform__no_instance(self):
with self.assertRaises(XFormParseError) as ve:
validate_xform(
'''
<h:html
xmlns="http://www.w3.org/2002/xforms"
xmlns:h="http://www.w3.org/1999/xhtml">
<h:head>
<h:title/>
<model>
<instance>
</instance>
</model>
</h:head>
<h:body/>
</h:html>
'''
)
self.assertIn('Missing required instance definition.', str(ve.exception), ve)
def test__validate_xform__no_title__no_form_id(self):
with self.assertRaises(XFormParseError) as ve:
validate_xform(
'''
<h:html
xmlns="http://www.w3.org/2002/xforms"
xmlns:h="http://www.w3.org/1999/xhtml">
<h:head>
<h:title/>
<model>
<instance>
<A/>
</instance>
</model>
</h:head>
<h:body/>
</h:html>
'''
)
self.assertIn('Missing required form title and instance ID.', str(ve.exception), ve)
def test__validate_xform__no_title__blank(self):
with self.assertRaises(XFormParseError) as ve:
validate_xform(
'''
<h:html
xmlns="http://www.w3.org/2002/xforms"
xmlns:h="http://www.w3.org/1999/xhtml">
<h:head>
<h:title/>
<model>
<instance>
<B id="xform-id-test"/>
</instance>
</model>
</h:head>
<h:body/>
</h:html>
'''
)
self.assertIn('Missing required form title.', str(ve.exception), ve)
def test__validate_xform__no_xform_id(self):
with self.assertRaises(XFormParseError) as ve:
validate_xform(
'''
<h:html
xmlns="http://www.w3.org/2002/xforms"
xmlns:h="http://www.w3.org/1999/xhtml">
<h:head>
<h:title>xForm - Test</h:title>
<model>
<instance>
<None/>
</instance>
</model>
</h:head>
<h:body/>
</h:html>
'''
)
self.assertIn('Missing required instance ID.', str(ve.exception), ve)
def test__validate_xform__no_xform_id__blank(self):
with self.assertRaises(XFormParseError) as ve:
validate_xform(
'''
<h:html
xmlns="http://www.w3.org/2002/xforms"
xmlns:h="http://www.w3.org/1999/xhtml">
<h:head>
<h:title>xForm - Test</h:title>
<model>
<instance>
<C id=""/>
</instance>
</model>
</h:head>
<h:body/>
</h:html>
'''
)
self.assertIn('Missing required instance ID.', str(ve.exception), ve)
def test__validate_xform__with__title__and__xform_id(self):
try:
validate_xform(self.samples['xform']['xml-ok'])
self.assertTrue(True)
except XFormParseError as ve:
self.assertIsNone(ve)
self.assertTrue(False)
def test__validate_xform__bad_calculate_formula(self):
with self.assertRaises(XFormParseError) as ve:
validate_xform(
'''
<h:html
xmlns="http://www.w3.org/2002/xforms"
xmlns:h="http://www.w3.org/1999/xhtml"
xmlns:odk="http://www.opendatakit.org/xforms">
<h:head>
<h:title>1 Health care worker registration</h:title>
<model odk:xforms-version="1.0.0">
<instance>
<None id="1_hcw_registration" version="11">
<sms_body/>
</None>
</instance>
<bind
calculate="concat(" ", /None/healthcareworker/identifier_hcw_numbers ,)"
nodeset="/None/sms_body"
type="string"/>
</model>
</h:head>
<h:body>
</h:body>
</h:html>
'''
)
self.assertIn('Invalid calculate', str(ve.exception), ve)
class XFormUtilsParsersTests(CustomTestCase):
def test__parse_xml_to_dict(self):
xml_str = '''
<root attr="a">
<!-- Comments are ignored -->
<a>
Some text
<!-- It does not parse values, everything is a string -->
<b>1</b>
</a>
<a>
<b/>
</a>
<a>
Some text
<b>1</b>
More text (and IT'S IGNORED!!!)
</a>
<!-- This tag below will appear as a None value -->
<a/>
</root>
'''
expected = {
'root': {
'@attr': 'a',
'a': [
{
'#text': 'Some text',
'b': '1',
},
{
'b': None,
},
{
'#text': 'Some text',
'b': '1',
},
None, # Oh!
]
}
}
self.assertEqual(parse_xml_to_dict(xml_str), expected)
def test__parse_xform_file(self):
with open(self.samples['xform']['file-xls'], 'rb') as fp:
xls_content = parse_xform_file('xform.xls', fp)
with open(self.samples['xform']['file-xml'], 'rb') as fp:
xml_content = parse_xform_file('xform.xml', fp)
# From pyxform 1.2.0 this is no longer true ("itexts" ids diverge)
# self.assertEqual(
# parse_xml_to_dict(xls_content),
# parse_xml_to_dict(xml_content),
# 'The XLS form and the XML form should define both the same form'
# )
xls_dict = parse_xml_to_dict(xls_content)
xml_dict = parse_xml_to_dict(xml_content)
self.assertEqual(
xls_dict['h:html']['h:head']['model']['instance'][0],
xml_dict['h:html']['h:head']['model']['instance'][0],
'The XLS form and the XML form should define both the same instance'
)
self.assertEqual(
xls_dict['h:html']['h:body'],
xml_dict['h:html']['h:body'],
'The XLS form and the XML form should define both the same body'
)
def test__parse_submission(self):
with open(self.samples['submission']['file-ok'], 'rb') as xml:
data, form_id, version, instance_id = get_instance_data_from_xml(xml.read())
with open(self.samples['submission']['file-ok-json'], 'rb') as content:
expected = json.load(content)
self.assertEqual(form_id, 'my-test-form')
self.assertEqual(version, 'test-1.0')
self.assertEqual(instance_id, 'uuid:cef69d9d-ebd9-408f-8bc6-9d418bb083d9')
self.assertEqual(len(list(data.keys())), 1)
self.assertEqual(list(data.keys())[0], 'Something_that_is_not_None')
submission = parse_submission(data, self.samples['xform']['raw-xml'])
self.assertNotEqual(list(submission.keys())[0], 'Something_that_is_not_None', submission.keys())
self.assertEqual(submission, expected, json.dumps(submission, indent=2))
def test__parse_submission__with_multilanguage(self):
with open(self.samples['submission']['file-ok'], 'rb') as xml:
data, form_id, version, instance_id = get_instance_data_from_xml(xml.read())
with open(self.samples['submission']['file-ok-json'], 'rb') as content:
expected = json.load(content)
self.assertEqual(form_id, 'my-test-form')
self.assertEqual(version, 'test-1.0')
self.assertEqual(instance_id, 'uuid:cef69d9d-ebd9-408f-8bc6-9d418bb083d9')
self.assertEqual(len(list(data.keys())), 1)
self.assertEqual(list(data.keys())[0], 'Something_that_is_not_None')
# this form definition has more than one language declared
submission = parse_submission(data, self.samples['xform']['raw-xml-i18n'])
self.assertNotEqual(list(submission.keys())[0], 'Something_that_is_not_None', submission.keys())
self.assertEqual(submission, expected, json.dumps(submission, indent=2))
class XFormUtilsAvroTests(CustomTestCase):
def test__get_all_paths(self):
self.assertEqual(get_paths({}), [])
self.assertEqual(get_paths({'@a': 0}), [])
self.assertEqual(get_paths({'a': 0}), [('/a', False)])
self.assertEqual(get_paths({'a': {'b': 0}}), [('/a', True), ('/a/b', False)])
self.assertEqual(
get_paths({'a': {'@aether_default_visualization': 'pie', 'c': 0}}),
[('/a', True), ('/a/c', False)])
self.assertEqual(
get_paths({'a': {'b': 0, 'c': 0}}),
[('/a', True), ('/a/b', False), ('/a/c', False)])
def test__get_avro_type__not_required(self):
# avro types
self.assertEqual(get_type('boolean'), ['null', 'string'])
self.assertEqual(get_type('bytes'), ['null', 'string'])
self.assertEqual(get_type('double'), ['null', 'double'])
self.assertEqual(get_type('float'), ['null', 'float'])
self.assertEqual(get_type('int'), ['null', 'int'])
self.assertEqual(get_type('long'), ['null', 'long'])
self.assertEqual(get_type('string'), ['null', 'string'])
# xform specific types
self.assertEqual(get_type('binary'), ['null', 'string'])
self.assertEqual(get_type('date'), ['null', 'string'])
self.assertEqual(get_type('dateTime'), ['null', 'string'])
self.assertEqual(get_type('decimal'), ['null', 'double'])
self.assertEqual(get_type('integer'), ['null', 'int'])
self.assertEqual(get_type('select'), ['null', 'string'])
self.assertEqual(get_type('select1'), ['null', 'string'])
self.assertEqual(get_type('short'), ['null', 'int'])
# unknown
self.assertEqual(get_type('any-type'), ['null', 'string'])
def test__get_avro_type__required(self):
# avro types
self.assertEqual(get_type('boolean', True), 'string')
self.assertEqual(get_type('bytes', True), 'string')
self.assertEqual(get_type('double', True), 'double')
self.assertEqual(get_type('float', True), 'float')
self.assertEqual(get_type('int', True), 'int')
self.assertEqual(get_type('long', True), 'long')
self.assertEqual(get_type('string', True), 'string')
# xform specific types
self.assertEqual(get_type('binary', True), 'string')
self.assertEqual(get_type('date', True), 'string')
self.assertEqual(get_type('dateTime', True), 'string')
self.assertEqual(get_type('decimal', True), 'double')
self.assertEqual(get_type('integer', True), 'int')
self.assertEqual(get_type('select', True), 'string')
self.assertEqual(get_type('select1', True), 'string')
self.assertEqual(get_type('short', True), 'int')
# unknown
self.assertEqual(get_type('any-type', True), 'string')
def test__get_xform_instance__error(self):
with self.assertRaises(XFormParseError) as ve:
get_instance({})
self.assertIn('Missing required instance definition.', str(ve.exception), ve)
def test__get_xform_instance__error__no_instances(self):
with self.assertRaises(XFormParseError) as ve:
get_instance({
'h:html': {
'h:head': {
'model': {
'instance': {}
}
}
}
})
self.assertIn('Missing required instance definition.', str(ve.exception), ve)
def test__get_xform_instance__error___no_default_instance(self):
with self.assertRaises(XFormParseError) as ve:
get_instance({
'h:html': {
'h:head': {
'model': {
'instance': [
{'@id': 1},
{'@id': 2},
{'@id': 3},
]
}
}
}
})
self.assertIn('Missing required instance definition.', str(ve.exception), ve)
def test__get_xform_instance(self):
xform_dict = {
'h:html': {
'h:head': {
'model': {
'instance': [
{'@id': 1},
{'@id': 2},
{'root': {'content': 1}},
{'@id': 3},
]
}
}
}
}
self.assertEqual(get_instance(xform_dict, False), {'content': 1})
self.assertEqual(get_instance(xform_dict, True), {'root': {'content': 1}})
def test__get_xform_itexts__no_texts(self):
xform_dict = {'h:html': {'h:head': {'model': {}}}}
self.assertEqual(get_texts(xform_dict), {})
def test__get_xform_itexts__one_language(self):
xform_dict = {
'h:html': {
'h:head': {
'model': {
'itext': {
'translation': {
# this should always be there,
# but check that at least takes the first one
# '@default': 'true()',
'@lang': 'AA',
'text': {
'@id': 'a',
'value': 'A',
}
}
}
}
}
}
}
self.assertEqual(get_texts(xform_dict), {'a': 'A'})
def test__get_xform_itexts__multi_language(self):
xform_dict = {
'h:html': {
'h:head': {
'model': {
'itext': {
'translation': [
{
'@lang': 'AA',
'text': [
{
'@id': 'a',
'value': 'A',
},
{
'@id': 'b',
'value': 'B',
},
]
},
{
'@default': 'true()',
'@lang': 'BB',
'text': [
{
'@id': 'a',
# simple string
'value': 'B',
},
{
'@id': 'b',
# with embedded HTML
'value': '<span>C</span>',
},
{
'@id': 'c',
'value': [],
},
]
},
]
}
}
}
}
}
self.assertEqual(get_texts(xform_dict), {'a': 'B', 'b': 'C'})
def test__get_xform_itexts__several_values(self):
xform_dict = {
'h:html': {
'h:head': {
'model': {
'itext': {
'translation': [
{
'@default': 'true()',
'@lang': 'BB',
'text': [
{
'@id': 'a',
'value': [
{'@form': 'image', '#text': 'a'},
'B',
'C',
],
},
{
'@id': 'b',
'value': [
'E',
{'@form': 'image', '#text': 'b'},
],
},
]
},
]
}
}
}
}
}
self.assertEqual(get_texts(xform_dict), {'a': 'B', 'b': 'E'})
def test__get_xform_label__no_body(self):
xform_dict = {}
self.assertIsNone(get_label(xform_dict, '/None'))
xform_dict = {'h:html': {'h:body': None}}
self.assertIsNone(get_label(xform_dict, '/None/any'))
def test__get_xform_label__no_linked_label(self):
xform_dict = {
'h:html': {
'h:body': {
'any-tag': {
'@ref': '/None/any',
}
}
}
}
self.assertIsNone(get_label(xform_dict, '/None/any'))
def test__get_xform_label__blank_label(self):
xform_dict = {
'h:html': {
'h:body': {
'any-tag': {
'@ref': '/None/any',
'label': '',
}
}
}
}
self.assertIsNone(get_label(xform_dict, '/None/any'))
def test__get_xform_label__string_value(self):
xform_dict = {
'h:html': {
'h:body': | |
<reponame>verypluming/SyGNS
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script computes an F-score between two DRSs. It is based on and very similar to SMATCH.
For detailed description of smatch, see http://www.isi.edu/natural-language/drs/smatch-13.pdf
As opposed to AMRs, this script takes the clauses directly as input. Each clause should be on a
single line, with DRSs separated by a newline. Lines starting with '%' are ignored.
Command line options:
-f1 : First file with DRS clauses, usually produced file
-f2 : Second file with DRS clauses, usually gold file
-r : Number of restarts used (default 20)
-p : Number of parallel threads to use (default 1)
-mem : Memory limit per parallel thread (default 1G)
-s : What kind of smart initial mapping we use:
-no No smart mappings
-conc Smart mapping based on matching concepts (their match is likely to be in the optimal mapping)
-runs : Number of runs to average over, if you want a more reliable result (there is randomness involved in the initial restarts)
-prin : Print more specific output, such as individual (average) F-scores for the smart initial mappings, and the matching and non-matching clauses
-sig : Number of significant digits to output (default 4)
-b : Use this for baseline experiments, comparing a single DRS to a list of DRSs. Produced DRS file should contain a single DRS.
-ms : Instead of averaging the score, output a score for each DRS
-ms_file: Print the individual scores to a file (one score per line)
-nm : If added, do not print the clause mapping
-pa : Partial matching of clauses instead of full matching -- experimental setting!
Means that the clauses x1 work "n.01" x2 and x1 run "n.01" x2 are able to match for 0.75, for example
-st : Printing statistics regarding number of variables and clauses, don't do matching
-ic : Include REF clauses when matching (otherwise they are ignored because they inflate the matching)
-ds : Print detailed stats about individual clauses, parameter value is the minimum count of the clause to be included in the printing
-m : Max number of clauses for a DRS to still take them into account - default 0 means no limit
-g : Path of the file that contains the signature of clausal forms
-dse : Change all senses to a default sense
-dr : Change all roles to a default role
-dc : Change all concepts to a default concept
-ill : What to do with ill-formed DRSs. Throw an error (default), input dummy/SPAR DRS or try to output a score anyway (unofficial!)
-coda : For the CodaLab usage. Given a 'name', the script creates 'name.txt' and 'name.html' files
"""
import os
import random
import time
import argparse
import re
import sys
import multiprocessing
from multiprocessing import Pool
import json #reading in dict
try:
import cPickle as pickle
except ImportError:
import pickle
from numpy import median
try:
# only needed for Python 2
reload(sys)
sys.setdefaultencoding('utf-8') # necessary to avoid unicode errors
except:
pass
import psutil # for memory usage
# Imports for the hillclimbing algorithm
from hill_climbing import *
# Imports for format checking
from clf_referee import check_clf
from clf_referee import get_signature
# import html priting for codalab
from html_results import coda_html
# Import utils
from utils_counter import *
def build_arg_parser():
parser = argparse.ArgumentParser(description="Counter calculator -- arguments")
# Main arguments
parser.add_argument('-f1', required=True, type=str,
help='First file with DRS clauses, DRSs need to be separated by blank line')
parser.add_argument('-f2', required=True, type=str,
help='Second file with DRS clauses, DRSs need to be separated by blank line')
# Optimization (memory, speed, restarts, initial mappings)
parser.add_argument('-r', '--restarts', type=int,
default=20, help='Restart number (default: 20)')
parser.add_argument('-p', '--parallel', type=int, default=1,
help='Number of parallel threads we use (default 1)')
parser.add_argument('-mem', '--mem_limit', type=int, default=1000,
help='Memory limit in MBs (default 1000 -> 1G). Note that this is per parallel thread! If you use -par 4, each thread gets 1000 MB with default settings.')
parser.add_argument('-s', '--smart', default='conc', action='store', choices=[
'no', 'conc'], help='What kind of smart mapping do we use (default concepts)')
# Output settings (often not necessary to add or change), for example printing specific stats to a file, or to the screen
parser.add_argument('-prin', action='store_true',
help='Print very specific output - matching and non-matching clauses and specific F-scores for smart mappings')
parser.add_argument('-ms', action='store_true', default=False,
help='Output multiple scores (one pair per score) instead of a single document-level score (Default: false)')
parser.add_argument('-ms_file', default = '',
help='The file where we print the individual scores per DRS to -- one score per line (float) -- default empty means do not print to file')
parser.add_argument('-al', '--all_idv', action='store_true',
help='Add all idv information in the --ms_file file (match, prod, gold), not just the F-score')
parser.add_argument('-sig', '--significant', type=int,
default=4, help='significant digits to output (default: 4)')
parser.add_argument('-st', '--stats', default='', type=str,
help='If added this is the file we print a pickled dictionary to with statistics about number of clauses and variables.')
parser.add_argument('-ds', '--detailed_stats', type = int, default = 0,
help='If we add a value > 0 we print statistics about individual types of clauses that match or do not match, e.g. how well do we do on producing Theme, default 0 means do nothing')
parser.add_argument('-nm', '--no_mapping', action='store_true',
help='If added, do not print the mapping of variables in terms of clauses')
parser.add_argument('-g', '--signature', dest='sig_file', default = '',
help='If added, this contains a file with all allowed roles otherwise a simple signature is used that\
mainly recognizes operators based on their formatting')
parser.add_argument('-coda', '--codalab', default='',
help='a filename for which evaluation results for CodaLab are written in filename.txt and filename.html (default no writing)')
# Experiments with changing the input/output, or the matching algorithm
# If you add this the results will differ from the general F-score
parser.add_argument('-ill', default = 'error', choices =['error','dummy','spar', 'score'],
help='What to do when encountering an ill-formed DRS. Throw an error (default), input dummy or spar DRS, or give a score anyway (those scores are not official though!)')
parser.add_argument('-runs', type=int, default=1,
help='Usually we do 1 run, only for experiments we can increase the number of runs to get a better average')
parser.add_argument('-m', '--max_clauses', type=int, default=0,
help='Maximum number of clauses for DRS (default 0 means no limit)')
parser.add_argument('-b', '--baseline', action='store_true', default=False,
help="Helps in deciding a good baseline DRS. If added, prod-file must be a single DRS, gold file a number of DRSs to compare to (default false)")
parser.add_argument('-pa', '--partial', action='store_true',
help='Do partial matching for the DRSs (experimental!)')
parser.add_argument('-dse', '--default_sense', action='store_true',
help='Add a default sense for all word-senses (exclude effect of getting sense correct)')
parser.add_argument('-dr', '--default_role', action='store_true',
help='Add a default role for all role clauses (exclude effect of getting role correct)')
parser.add_argument('-dc', '--default_concept', action='store_true',
help='Add a default concept + sense for all concept clauses (exclude effect of getting concepts correct)')
parser.add_argument('-ic', '--include_ref', action='store_true',
help='Include REF clauses when matching -- will inflate the scores')
args = parser.parse_args()
# Check if files exist
if not os.path.exists(args.f1):
raise ValueError("File for -f1 does not exist")
if not os.path.exists(args.f2):
raise ValueError("File for -f2 does not exist")
# Check if combination of arguments is valid
if args.ms and args.runs > 1:
raise NotImplementedError("Not implemented to average over individual scores, only use -ms when doing a single run")
if args.restarts < 1:
raise ValueError('Number of restarts must be larger than 0')
if args.ms and args.parallel > 1:
print('WARNING: using -ms and -p > 1 messes up printing to screen - not recommended')
time.sleep(5) # so people can still read the warning
if args.ill in ['dummy', 'spar']:
print('WARNING: by using -ill {0}, ill-formed DRSs are replaced by a {0} DRS'.format(args.ill))
time.sleep(3)
elif args.ill == 'score':
print ('WARNING: ill-formed DRSs are given a score as if they were valid -- results in unofficial F-scores')
time.sleep(3)
if args.runs > 1 and args.prin:
print('WARNING: we do not print specific information (-prin) for runs > 1, only final averages')
time.sleep(5)
if args.partial:
raise NotImplementedError('Partial matching currently does not work')
return args
def remove_refs(clause_list, original_clauses):
'''Remove unneccessary/redundant b REF x clauses, only keep them if x never occurs again in the same box'''
final_clauses, final_original = [], []
for tup_idx, tup_set in enumerate(clause_list):
cur_tup = []
cur_orig = []
for idx, spl_tup in enumerate(tup_set):
if len(spl_tup) < 3:
continue
elif spl_tup[1] == 'REF':
if not var_occurs(tup_set, spl_tup[2], spl_tup[0], idx): # only add if the variable does not occur afterwards
cur_tup.append(spl_tup)
cur_orig.append(original_clauses[tup_idx][idx])
else:
cur_tup.append(spl_tup)
cur_orig.append(original_clauses[tup_idx][idx])
final_clauses.append(cur_tup)
final_original.append(cur_orig)
return final_clauses, final_original
def get_clauses(file_name, signature, ill_type):
'''Function that returns a list of DRSs (that consists of clauses)'''
clause_list, original_clauses, cur_orig, cur_clauses = [], [], [], []
with open(file_name, 'r') as in_f:
input_lines = in_f.read().split('\n')
for idx, line in enumerate(input_lines):
if line.strip().startswith('%'):
pass # skip comments
elif not line.strip():
if cur_clauses: # newline, so DRS is finished, add to list. Ignore double/clause newlines
# First check if the DRS is valid, will error if invalid
try:
# Deactivate check_if function
# check_clf([tuple(c) for c in cur_clauses], signature, v=False)
clause_list.append(cur_clauses)
original_clauses.append(cur_orig)
except Exception as e:
if ill_type == 'error':
raise | |
"""Common classes and functions."""
from datetime import tzinfo
from enum import Enum, IntEnum
from typing import (
Any,
Callable,
Dict,
NamedTuple,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import arrow
from arrow import Arrow
from dateutil import tz
from .const import (
STATUS_AUTH_FAILED,
STATUS_BAD_STATE,
STATUS_ERROR_OCCURRED,
STATUS_INVALID_PARAMS,
STATUS_SUCCESS,
STATUS_TIMEOUT,
STATUS_TOO_MANY_REQUESTS,
STATUS_UNAUTHORIZED,
)
class SleepModel(IntEnum):
"""Sleep model."""
TRACKER = 16
SLEEP_MONITOR = 32
def new_sleep_model(value: Optional[int]) -> SleepModel:
"""Create enum base on primitive."""
return cast(SleepModel, enum_or_raise(value, SleepModel))
class SleepState(IntEnum):
"""Sleep states."""
AWAKE = 0
LIGHT = 1
DEEP = 2
REM = 3
def new_sleep_state(value: Optional[int]) -> SleepState:
"""Create enum base on primitive."""
return cast(SleepState, enum_or_raise(value, SleepState))
class MeasureGetMeasGroupAttrib(IntEnum):
"""Measure group attributions."""
UNKNOWN = -1
DEVICE_ENTRY_FOR_USER = 0
DEVICE_ENTRY_FOR_USER_AMBIGUOUS = 1
MANUAL_USER_ENTRY = 2
MANUAL_USER_DURING_ACCOUNT_CREATION = 4
MEASURE_AUTO = 5
MEASURE_USER_CONFIRMED = 7
SAME_AS_DEVICE_ENTRY_FOR_USER = 8
def new_measure_group_attrib(value: Optional[int]) -> MeasureGetMeasGroupAttrib:
"""Create enum base on primitive."""
return cast(
MeasureGetMeasGroupAttrib, enum_or_raise(value, MeasureGetMeasGroupAttrib)
)
class MeasureGetMeasGroupCategory(IntEnum):
"""Measure categories."""
REAL = 1
USER_OBJECTIVES = 2
def new_measure_category(value: Optional[int]) -> MeasureGetMeasGroupCategory:
"""Create enum base on primitive."""
return cast(
MeasureGetMeasGroupCategory, enum_or_raise(value, MeasureGetMeasGroupCategory)
)
class MeasureType(IntEnum):
"""Measure types."""
WEIGHT = 1
HEIGHT = 4
FAT_FREE_MASS = 5
FAT_RATIO = 6
FAT_MASS_WEIGHT = 8
DIASTOLIC_BLOOD_PRESSURE = 9
SYSTOLIC_BLOOD_PRESSURE = 10
HEART_RATE = 11
TEMPERATURE = 12
SP02 = 54
BODY_TEMPERATURE = 71
SKIN_TEMPERATURE = 73
MUSCLE_MASS = 76
HYDRATION = 77
BONE_MASS = 88
PULSE_WAVE_VELOCITY = 91
def new_measure_type(value: Optional[int]) -> MeasureType:
"""Create enum base on primitive."""
return cast(MeasureType, enum_or_raise(value, MeasureType))
class NotifyAppli(IntEnum):
"""Data to notify_subscribe to."""
WEIGHT = 1
CIRCULATORY = 4
ACTIVITY = 16
SLEEP = 44
USER = 46
BED_IN = 50
BED_OUT = 51
def new_notify_appli(value: Optional[int]) -> NotifyAppli:
"""Create enum base on primitive."""
return cast(NotifyAppli, enum_or_raise(value, NotifyAppli))
class GetActivityField(Enum):
"""Fields for the getactivity api call."""
STEPS = "steps"
DISTANCE = "distance"
ELEVATION = "elevation"
SOFT = "soft"
MODERATE = "moderate"
INTENSE = "intense"
ACTIVE = "active"
CALORIES = "calories"
TOTAL_CALORIES = "totalcalories"
HR_AVERAGE = "hr_average"
HR_MIN = "hr_min"
HR_MAX = "hr_max"
HR_ZONE_0 = "hr_zone_0"
HR_ZONE_1 = "hr_zone_1"
HR_ZONE_2 = "hr_zone_2"
HR_ZONE_3 = "hr_zone_3"
class GetSleepField(Enum):
"""Fields for getsleep api call."""
HR = "hr"
RR = "rr"
class GetSleepSummaryField(Enum):
"""Fields for get sleep summary api call."""
REM_SLEEP_DURATION = "remsleepduration"
WAKEUP_DURATION = "wakeupduration"
LIGHT_SLEEP_DURATION = "lightsleepduration"
DEEP_SLEEP_DURATION = "deepsleepduration"
WAKEUP_COUNT = "wakeupcount"
DURATION_TO_SLEEP = "durationtosleep"
DURATION_TO_WAKEUP = "durationtowakeup"
HR_AVERAGE = "hr_average"
HR_MIN = "hr_min"
HR_MAX = "hr_max"
RR_AVERAGE = "rr_average"
RR_MIN = "rr_min"
RR_MAX = "rr_max"
class AuthScope(Enum):
"""Authorization scopes."""
USER_INFO = "user.info"
USER_METRICS = "user.metrics"
USER_ACTIVITY = "user.activity"
USER_SLEEP_EVENTS = "user.sleepevents"
UserGetDeviceDevice = NamedTuple(
"UserGetDeviceDevice",
(
("type", str),
("model", str),
("battery", str),
("deviceid", str),
("timezone", tzinfo),
),
)
UserGetDeviceResponse = NamedTuple(
"GetDeviceResponse", (("devices", Tuple[UserGetDeviceDevice, ...]),)
)
SleepGetTimestamp = NamedTuple("SleepGetTimestamp", [("timestamp", Arrow)])
SleepGetSerie = NamedTuple(
"SleepGetSerie",
(
("enddate", Arrow),
("startdate", Arrow),
("state", SleepState),
("hr", Optional[SleepGetTimestamp]),
("rr", Optional[SleepGetTimestamp]),
),
)
SleepGetResponse = NamedTuple(
"GetSleepResponse", (("model", SleepModel), ("series", Tuple[SleepGetSerie, ...]))
)
GetSleepSummaryData = NamedTuple(
"GetSleepSummaryData",
(
("remsleepduration", Optional[int]),
("wakeupduration", Optional[int]),
("lightsleepduration", Optional[int]),
("deepsleepduration", Optional[int]),
("wakeupcount", Optional[int]),
("durationtosleep", Optional[int]),
("durationtowakeup", Optional[int]),
("hr_average", Optional[int]),
("hr_min", Optional[int]),
("hr_max", Optional[int]),
("rr_average", Optional[int]),
("rr_min", Optional[int]),
("rr_max", Optional[int]),
),
)
GetSleepSummarySerie = NamedTuple(
"GetSleepSummarySerie",
(
("timezone", tzinfo),
("model", SleepModel),
("startdate", Arrow),
("enddate", Arrow),
("date", Arrow),
("modified", Arrow),
("data", GetSleepSummaryData),
),
)
SleepGetSummaryResponse = NamedTuple(
"GetSleepSummaryResponse",
(("more", bool), ("offset", int), ("series", Tuple[GetSleepSummarySerie, ...])),
)
MeasureGetMeasMeasure = NamedTuple(
"MeasureGetMeasMeasure", (("type", MeasureType), ("unit", int), ("value", int))
)
MeasureGetMeasGroup = NamedTuple(
"MeasureGetMeasGroup",
(
("attrib", MeasureGetMeasGroupAttrib),
("category", MeasureGetMeasGroupCategory),
("created", Arrow),
("date", Arrow),
("deviceid", Optional[str]),
("grpid", int),
("measures", Tuple[MeasureGetMeasMeasure, ...]),
),
)
MeasureGetMeasResponse = NamedTuple(
"GetMeasResponse",
(
("measuregrps", Tuple[MeasureGetMeasGroup, ...]),
("more", Optional[bool]),
("offset", Optional[int]),
("timezone", tzinfo),
("updatetime", Arrow),
),
)
MeasureGetActivityActivity = NamedTuple(
"MeasureGetActivityActivity",
(
("date", Arrow),
("timezone", tzinfo),
("deviceid", Optional[str]),
("brand", int),
("is_tracker", bool),
("steps", Optional[int]),
("distance", Optional[float]),
("elevation", Optional[float]),
("soft", Optional[int]),
("moderate", Optional[int]),
("intense", Optional[int]),
("active", Optional[int]),
("calories", Optional[float]),
("totalcalories", float),
("hr_average", Optional[int]),
("hr_min", Optional[int]),
("hr_max", Optional[int]),
("hr_zone_0", Optional[int]),
("hr_zone_1", Optional[int]),
("hr_zone_2", Optional[int]),
("hr_zone_3", Optional[int]),
),
)
MeasureGetActivityResponse = NamedTuple(
"GetActivityResponse",
(
("activities", Tuple[MeasureGetActivityActivity, ...]),
("more", bool),
("offset", int),
),
)
Credentials = NamedTuple(
"Credentials",
(
("access_token", str),
("token_expiry", int),
("token_type", str),
("refresh_token", str),
("userid", int),
("client_id", str),
("consumer_secret", str),
),
)
NotifyListProfile = NamedTuple(
"NotifyListProfile",
(
("appli", NotifyAppli),
("callbackurl", str),
("expires", Arrow),
("comment", Optional[str]),
),
)
NotifyListResponse = NamedTuple(
"NotifyListResponse", (("profiles", Tuple[NotifyListProfile, ...]),)
)
NotifyGetResponse = NamedTuple(
"NotifyGetResponse",
(("appli", NotifyAppli), ("callbackurl", str), ("comment", Optional[str])),
)
GenericType = TypeVar("GenericType")
class UnexpectedTypeException(Exception):
"""Thrown when encountering an unexpected type."""
def __init__(self, value: Any, expected: Type[GenericType]):
"""Initialize."""
super().__init__(
'Expected of "%s" to be "%s" but was "%s."' % (value, expected, type(value))
)
def enforce_type(value: Any, expected: Type[GenericType]) -> GenericType:
"""Enforce a data type."""
if not isinstance(value, expected):
raise UnexpectedTypeException(value, expected)
return value
def value_or_none(
value: Any, convert_fn: Callable[[Any], GenericType]
) -> Union[GenericType, None]:
"""Convert a value given a specific conversion function."""
if value is None:
return None
try:
return convert_fn(value)
except Exception: # pylint: disable=broad-except
return None
def enum_or_raise(value: Optional[Union[str, int]], enum: Type[Enum]) -> Enum:
"""Return Enum or raise exception."""
if value is None:
raise Exception("Received None value for enum %s" % enum)
return enum(value)
def str_or_raise(value: Any) -> str:
"""Return string or raise exception."""
return enforce_type(str_or_none(value), str)
def str_or_none(value: Any) -> Optional[str]:
"""Return str or None."""
return value_or_none(value, str)
def bool_or_raise(value: Any) -> bool:
"""Return bool or raise exception."""
return enforce_type(value, bool)
def bool_or_none(value: Any) -> Optional[bool]:
"""Return bool or None."""
return value_or_none(value, bool)
def int_or_raise(value: Any) -> int:
"""Return int or raise exception."""
return enforce_type(int_or_none(value), int)
def int_or_none(value: Any) -> Optional[int]:
"""Return int or None."""
return value_or_none(value, int)
def float_or_raise(value: Any) -> float:
"""Return float or raise exception."""
return enforce_type(float_or_none(value), float)
def float_or_none(value: Any) -> Optional[float]:
"""Return float or None."""
return value_or_none(value, float)
def arrow_or_none(value: Any) -> Optional[Arrow]:
"""Returns Arrow or None."""
if value is None:
return None
return arrow.get(value)
def timezone_or_none(value: Any) -> Optional[tzinfo]:
"""Returns tzinfo or None."""
if value is None:
return None
return tz.gettz(value)
def arrow_or_raise(value: Any) -> Arrow:
"""Return Arrow or raise exception."""
return enforce_type(arrow_or_none(value), Arrow)
def timezone_or_raise(value: Any) -> tzinfo:
"""Return tzinfo or raise exception."""
return enforce_type(timezone_or_none(value), tzinfo)
def dict_or_raise(value: Any) -> Dict[Any, Any]:
"""Return dict or raise exception."""
return enforce_type(value, dict)
def dict_or_none(value: Any) -> Optional[Dict[Any, Any]]:
"""Return dict or None."""
return value_or_none(value, dict)
def new_credentials(
client_id: str, consumer_secret: str, data: Dict[str, Any]
) -> Credentials:
"""Create Credentials from config and json."""
return Credentials(
access_token=str_or_raise(data.get("access_token")),
token_expiry=arrow.utcnow().timestamp + data.get("expires_in"),
token_type=str_or_raise(data.get("token_type")),
refresh_token=str_or_raise(data.get("refresh_token")),
userid=int_or_raise(data.get("userid")),
client_id=str_or_raise(client_id),
consumer_secret=str_or_raise(consumer_secret),
)
def new_user_get_device_device(data: dict) -> UserGetDeviceDevice:
"""Create GetDeviceDevice from json."""
return UserGetDeviceDevice(
type=str_or_raise(data.get("type")),
model=str_or_raise(data.get("model")),
battery=str_or_raise(data.get("battery")),
deviceid=str_or_raise(data.get("deviceid")),
timezone=timezone_or_raise(data.get("timezone")),
)
def new_user_get_device_response(data: dict) -> UserGetDeviceResponse:
"""Create GetDeviceResponse from json."""
return UserGetDeviceResponse(
devices=tuple(
new_user_get_device_device(device) for device in data.get("devices", ())
)
)
def new_notify_list_profile(data: dict) -> NotifyListProfile:
"""Create ListSubscriptionProfile from json."""
return NotifyListProfile(
appli=new_notify_appli(data.get("appli")),
callbackurl=str_or_raise(data.get("callbackurl")),
expires=arrow_or_none(data.get("expires")),
comment=str_or_none(data.get("comment")),
)
def new_notify_list_response(data: dict) -> NotifyListResponse:
"""Create NotifyListResponse from json."""
return NotifyListResponse(
profiles=tuple(
new_notify_list_profile(profile) for profile in data.get("profiles", ())
)
)
def new_notify_get_response(data: dict) -> NotifyGetResponse:
"""Create NotifyGetResponse from json."""
return NotifyGetResponse(
appli=new_notify_appli(data.get("appli")),
callbackurl=str_or_raise(data.get("callbackurl")),
comment=str_or_none(data.get("comment")),
)
def new_sleep_timestamp(data: Optional[Dict[Any, Any]]) -> Optional[SleepGetTimestamp]:
"""Create SleepTimestamp from json."""
if data is None:
return data
return SleepGetTimestamp(arrow_or_raise(data.get("$timestamp")))
def new_sleep_get_serie(data: dict) -> SleepGetSerie:
"""Create GetSleepSerie from json."""
return SleepGetSerie(
enddate=arrow_or_raise(data.get("enddate")),
startdate=arrow_or_raise(data.get("startdate")),
state=new_sleep_state(data.get("state")),
hr=new_sleep_timestamp(dict_or_none(data.get("hr"))),
rr=new_sleep_timestamp(dict_or_none(data.get("rr"))),
)
def new_sleep_get_response(data: dict) -> SleepGetResponse:
"""Create GetSleepResponse from json."""
return SleepGetResponse(
model=new_sleep_model(data.get("model")),
series=tuple(new_sleep_get_serie(serie) for serie in data.get("series", ())),
)
def new_get_sleep_summary_data(data: dict) -> GetSleepSummaryData:
"""Create GetSleepSummarySerie from json."""
return GetSleepSummaryData(
remsleepduration=int_or_none(data.get("remsleepduration")),
wakeupduration=int_or_none(data.get("wakeupduration")),
lightsleepduration=int_or_none(data.get("lightsleepduration")),
deepsleepduration=int_or_none(data.get("deepsleepduration")),
wakeupcount=int_or_none(data.get("wakeupcount")),
durationtosleep=int_or_none(data.get("durationtosleep")),
durationtowakeup=int_or_none(data.get("durationtowakeup")),
hr_average=int_or_none(data.get("hr_average")),
hr_min=int_or_none(data.get("hr_min")),
hr_max=int_or_none(data.get("hr_max")),
rr_average=int_or_none(data.get("rr_average")),
rr_min=int_or_none(data.get("rr_min")),
rr_max=int_or_none(data.get("rr_max")),
)
def new_get_sleep_summary_serie(data: dict) -> GetSleepSummarySerie:
"""Create GetSleepSummarySerie from json."""
timezone = timezone_or_raise(data.get("timezone"))
return GetSleepSummarySerie(
date=arrow_or_raise(data.get("date")).replace(tzinfo=timezone),
enddate=arrow_or_raise(data.get("enddate")).replace(tzinfo=timezone),
model=new_sleep_model(data.get("model")),
modified=arrow_or_raise(data.get("modified")).replace(tzinfo=timezone),
startdate=arrow_or_raise(data.get("startdate")).replace(tzinfo=timezone),
timezone=timezone,
data=new_get_sleep_summary_data(dict_or_raise(data.get("data"))),
)
def new_sleep_get_summary_response(data: dict) -> SleepGetSummaryResponse:
"""Create GetSleepSummaryResponse from json."""
return SleepGetSummaryResponse(
more=bool_or_raise(data.get("more")),
offset=int_or_raise(data.get("offset")),
series=tuple(
new_get_sleep_summary_serie(serie) for serie in data.get("series", ())
),
)
def new_measure_get_meas_measure(data: dict) -> MeasureGetMeasMeasure:
"""Create GetMeasMeasure from json."""
return MeasureGetMeasMeasure(
value=int_or_raise(data.get("value")),
type=new_measure_type(data.get("type")),
unit=int_or_raise(data.get("unit")),
)
def new_measure_get_meas_group(data: dict, timezone: tzinfo) -> MeasureGetMeasGroup:
"""Create GetMeasGroup from json."""
return MeasureGetMeasGroup(
grpid=int_or_raise(data.get("grpid")),
attrib=new_measure_group_attrib(data.get("attrib")),
date=arrow_or_raise(data.get("date")).replace(tzinfo=timezone),
created=arrow_or_raise(data.get("created")).replace(tzinfo=timezone),
category=new_measure_category(data.get("category")),
deviceid=data.get("deviceid"),
measures=tuple(
new_measure_get_meas_measure(measure)
for measure in data.get("measures", ())
),
)
def new_measure_get_meas_response(data: dict) -> MeasureGetMeasResponse:
"""Create GetMeasResponse from json."""
timezone = timezone_or_raise(data.get("timezone"))
return MeasureGetMeasResponse(
measuregrps=tuple(
new_measure_get_meas_group(group, timezone)
for group in data.get("measuregrps", ())
),
more=data.get("more"),
offset=data.get("offset"),
timezone=timezone,
updatetime=arrow_or_raise(data.get("updatetime")).replace(tzinfo=timezone),
)
def new_measure_get_activity_activity(data: dict) -> MeasureGetActivityActivity:
"""Create GetActivityActivity from json."""
timezone = timezone_or_raise(data.get("timezone"))
return MeasureGetActivityActivity(
date=arrow_or_raise(data.get("date")).replace(tzinfo=timezone),
timezone=timezone,
deviceid=str_or_none(data.get("deviceid")),
brand=int_or_raise(data.get("brand")),
is_tracker=bool_or_raise(data.get("is_tracker")),
steps=int_or_none(data.get("steps")),
distance=float_or_raise(data.get("distance")),
elevation=float_or_raise(data.get("elevation")),
soft=int_or_none(data.get("soft")),
moderate=int_or_none(data.get("moderate")),
intense=int_or_none(data.get("intense")),
active=int_or_none(data.get("active")),
calories=float_or_raise(data.get("calories")),
totalcalories=float_or_raise(data.get("totalcalories")),
hr_average=int_or_none(data.get("hr_average")),
hr_min=int_or_none(data.get("hr_min")),
hr_max=int_or_none(data.get("hr_max")),
hr_zone_0=int_or_none(data.get("hr_zone_0")),
hr_zone_1=int_or_none(data.get("hr_zone_1")),
hr_zone_2=int_or_none(data.get("hr_zone_2")),
hr_zone_3=int_or_none(data.get("hr_zone_3")),
)
def new_measure_get_activity_response(data: dict) -> MeasureGetActivityResponse:
"""Create GetActivityResponse from json."""
return MeasureGetActivityResponse(
activities=tuple(
new_measure_get_activity_activity(activity)
for activity in data.get("activities", | |
<filename>sdk/python/pulumi_alicloud/clickhouse/db_cluster.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DbClusterArgs', 'DbCluster']
@pulumi.input_type
class DbClusterArgs:
def __init__(__self__, *,
category: pulumi.Input[str],
db_cluster_class: pulumi.Input[str],
db_cluster_network_type: pulumi.Input[str],
db_cluster_version: pulumi.Input[str],
db_node_group_count: pulumi.Input[int],
db_node_storage: pulumi.Input[str],
payment_type: pulumi.Input[str],
storage_type: pulumi.Input[str],
db_cluster_description: Optional[pulumi.Input[str]] = None,
encryption_key: Optional[pulumi.Input[str]] = None,
encryption_type: Optional[pulumi.Input[str]] = None,
maintain_time: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
used_time: Optional[pulumi.Input[str]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DbCluster resource.
:param pulumi.Input[str] category: The Category of DBCluster. Valid values: `Basic`,`HighAvailability`.
:param pulumi.Input[str] db_cluster_class: The DBCluster class. According to the category, db_cluster_class has two value ranges:
* Under the condition that the category is the `Basic`, Valid values: `S4-NEW`, `S8`, `S16`, `S32`, `S64`, `S104`.
* Under the condition that the category is the `HighAvailability`, Valid values: `C4-NEW`, `C8`, `C16`, `C32`, `C64`, `C104`.
:param pulumi.Input[str] db_cluster_network_type: The DBCluster network type. Valid values: `vpc`.
:param pulumi.Input[str] db_cluster_version: The DBCluster version. Valid values: `192.168.127.12`, `192.168.127.12`, `172.16.17.32`.
:param pulumi.Input[int] db_node_group_count: The db node group count. The number should between 1 and 48.
:param pulumi.Input[str] db_node_storage: The db node storage.
:param pulumi.Input[str] payment_type: The payment type of the resource. Valid values: `PayAsYouGo`,`Subscription`.
:param pulumi.Input[str] storage_type: Storage type of DBCluster. Valid values: `cloud_essd`, `cloud_efficiency`, `cloud_essd_pl2`, `cloud_essd_pl3`.
:param pulumi.Input[str] db_cluster_description: The DBCluster description.
:param pulumi.Input[str] encryption_key: Key management service KMS key ID.
:param pulumi.Input[str] encryption_type: Currently only supports ECS disk encryption, with a value of CloudDisk, not encrypted when empty.
:param pulumi.Input[str] maintain_time: The maintenance window of DBCluster. Valid format: `hh:mmZ-hh:mm Z`.
:param pulumi.Input[str] period: Pre-paid cluster of the pay-as-you-go cycle. Valid values: `Month`, `Year`.
:param pulumi.Input[str] status: The status of the resource. Valid values: `Running`,`Creating`,`Deleting`,`Restarting`,`Preparing`,.
:param pulumi.Input[str] used_time: The used time of DBCluster.
:param pulumi.Input[str] vswitch_id: The vswitch id of DBCluster.
"""
pulumi.set(__self__, "category", category)
pulumi.set(__self__, "db_cluster_class", db_cluster_class)
pulumi.set(__self__, "db_cluster_network_type", db_cluster_network_type)
pulumi.set(__self__, "db_cluster_version", db_cluster_version)
pulumi.set(__self__, "db_node_group_count", db_node_group_count)
pulumi.set(__self__, "db_node_storage", db_node_storage)
pulumi.set(__self__, "payment_type", payment_type)
pulumi.set(__self__, "storage_type", storage_type)
if db_cluster_description is not None:
pulumi.set(__self__, "db_cluster_description", db_cluster_description)
if encryption_key is not None:
pulumi.set(__self__, "encryption_key", encryption_key)
if encryption_type is not None:
pulumi.set(__self__, "encryption_type", encryption_type)
if maintain_time is not None:
pulumi.set(__self__, "maintain_time", maintain_time)
if period is not None:
pulumi.set(__self__, "period", period)
if status is not None:
pulumi.set(__self__, "status", status)
if used_time is not None:
pulumi.set(__self__, "used_time", used_time)
if vswitch_id is not None:
pulumi.set(__self__, "vswitch_id", vswitch_id)
@property
@pulumi.getter
def category(self) -> pulumi.Input[str]:
"""
The Category of DBCluster. Valid values: `Basic`,`HighAvailability`.
"""
return pulumi.get(self, "category")
@category.setter
def category(self, value: pulumi.Input[str]):
pulumi.set(self, "category", value)
@property
@pulumi.getter(name="dbClusterClass")
def db_cluster_class(self) -> pulumi.Input[str]:
"""
The DBCluster class. According to the category, db_cluster_class has two value ranges:
* Under the condition that the category is the `Basic`, Valid values: `S4-NEW`, `S8`, `S16`, `S32`, `S64`, `S104`.
* Under the condition that the category is the `HighAvailability`, Valid values: `C4-NEW`, `C8`, `C16`, `C32`, `C64`, `C104`.
"""
return pulumi.get(self, "db_cluster_class")
@db_cluster_class.setter
def db_cluster_class(self, value: pulumi.Input[str]):
pulumi.set(self, "db_cluster_class", value)
@property
@pulumi.getter(name="dbClusterNetworkType")
def db_cluster_network_type(self) -> pulumi.Input[str]:
"""
The DBCluster network type. Valid values: `vpc`.
"""
return pulumi.get(self, "db_cluster_network_type")
@db_cluster_network_type.setter
def db_cluster_network_type(self, value: pulumi.Input[str]):
pulumi.set(self, "db_cluster_network_type", value)
@property
@pulumi.getter(name="dbClusterVersion")
def db_cluster_version(self) -> pulumi.Input[str]:
"""
The DBCluster version. Valid values: `192.168.127.12`, `192.168.127.12`, `172.16.17.32`.
"""
return pulumi.get(self, "db_cluster_version")
@db_cluster_version.setter
def db_cluster_version(self, value: pulumi.Input[str]):
pulumi.set(self, "db_cluster_version", value)
@property
@pulumi.getter(name="dbNodeGroupCount")
def db_node_group_count(self) -> pulumi.Input[int]:
"""
The db node group count. The number should between 1 and 48.
"""
return pulumi.get(self, "db_node_group_count")
@db_node_group_count.setter
def db_node_group_count(self, value: pulumi.Input[int]):
pulumi.set(self, "db_node_group_count", value)
@property
@pulumi.getter(name="dbNodeStorage")
def db_node_storage(self) -> pulumi.Input[str]:
"""
The db node storage.
"""
return pulumi.get(self, "db_node_storage")
@db_node_storage.setter
def db_node_storage(self, value: pulumi.Input[str]):
pulumi.set(self, "db_node_storage", value)
@property
@pulumi.getter(name="paymentType")
def payment_type(self) -> pulumi.Input[str]:
"""
The payment type of the resource. Valid values: `PayAsYouGo`,`Subscription`.
"""
return pulumi.get(self, "payment_type")
@payment_type.setter
def payment_type(self, value: pulumi.Input[str]):
pulumi.set(self, "payment_type", value)
@property
@pulumi.getter(name="storageType")
def storage_type(self) -> pulumi.Input[str]:
"""
Storage type of DBCluster. Valid values: `cloud_essd`, `cloud_efficiency`, `cloud_essd_pl2`, `cloud_essd_pl3`.
"""
return pulumi.get(self, "storage_type")
@storage_type.setter
def storage_type(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_type", value)
@property
@pulumi.getter(name="dbClusterDescription")
def db_cluster_description(self) -> Optional[pulumi.Input[str]]:
"""
The DBCluster description.
"""
return pulumi.get(self, "db_cluster_description")
@db_cluster_description.setter
def db_cluster_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_cluster_description", value)
@property
@pulumi.getter(name="encryptionKey")
def encryption_key(self) -> Optional[pulumi.Input[str]]:
"""
Key management service KMS key ID.
"""
return pulumi.get(self, "encryption_key")
@encryption_key.setter
def encryption_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encryption_key", value)
@property
@pulumi.getter(name="encryptionType")
def encryption_type(self) -> Optional[pulumi.Input[str]]:
"""
Currently only supports ECS disk encryption, with a value of CloudDisk, not encrypted when empty.
"""
return pulumi.get(self, "encryption_type")
@encryption_type.setter
def encryption_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encryption_type", value)
@property
@pulumi.getter(name="maintainTime")
def maintain_time(self) -> Optional[pulumi.Input[str]]:
"""
The maintenance window of DBCluster. Valid format: `hh:mmZ-hh:mm Z`.
"""
return pulumi.get(self, "maintain_time")
@maintain_time.setter
def maintain_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maintain_time", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[str]]:
"""
Pre-paid cluster of the pay-as-you-go cycle. Valid values: `Month`, `Year`.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the resource. Valid values: `Running`,`Creating`,`Deleting`,`Restarting`,`Preparing`,.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="usedTime")
def used_time(self) -> Optional[pulumi.Input[str]]:
"""
The used time of DBCluster.
"""
return pulumi.get(self, "used_time")
@used_time.setter
def used_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "used_time", value)
@property
@pulumi.getter(name="vswitchId")
def vswitch_id(self) -> Optional[pulumi.Input[str]]:
"""
The vswitch id of DBCluster.
"""
return pulumi.get(self, "vswitch_id")
@vswitch_id.setter
def vswitch_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vswitch_id", value)
@pulumi.input_type
class _DbClusterState:
def __init__(__self__, *,
category: Optional[pulumi.Input[str]] = None,
db_cluster_class: Optional[pulumi.Input[str]] = None,
db_cluster_description: Optional[pulumi.Input[str]] = None,
db_cluster_network_type: Optional[pulumi.Input[str]] = None,
db_cluster_version: Optional[pulumi.Input[str]] = None,
db_node_group_count: Optional[pulumi.Input[int]] = None,
db_node_storage: Optional[pulumi.Input[str]] = None,
encryption_key: Optional[pulumi.Input[str]] = None,
encryption_type: Optional[pulumi.Input[str]] = None,
maintain_time: Optional[pulumi.Input[str]] = None,
payment_type: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
storage_type: Optional[pulumi.Input[str]] = None,
used_time: Optional[pulumi.Input[str]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DbCluster resources.
:param pulumi.Input[str] category: The Category of DBCluster. Valid values: `Basic`,`HighAvailability`.
:param pulumi.Input[str] db_cluster_class: The DBCluster class. According to the category, db_cluster_class has two value ranges:
* Under the condition that the category is the `Basic`, Valid values: `S4-NEW`, `S8`, `S16`, `S32`, `S64`, `S104`.
* Under the condition that the category is the `HighAvailability`, Valid values: `C4-NEW`, `C8`, `C16`, `C32`, `C64`, `C104`.
:param pulumi.Input[str] db_cluster_description: The DBCluster description.
:param pulumi.Input[str] db_cluster_network_type: The DBCluster network type. Valid values: `vpc`.
:param pulumi.Input[str] db_cluster_version: The DBCluster version. Valid values: `192.168.127.12`, `192.168.127.12`, `172.16.17.32`.
:param pulumi.Input[int] db_node_group_count: The db node group count. The number should between 1 and 48.
:param pulumi.Input[str] db_node_storage: The db node storage.
:param pulumi.Input[str] encryption_key: Key management service KMS key ID.
:param pulumi.Input[str] encryption_type: Currently only supports ECS disk encryption, with a value of CloudDisk, not encrypted when empty.
:param pulumi.Input[str] maintain_time: The maintenance window of DBCluster. Valid format: `hh:mmZ-hh:mm Z`.
:param pulumi.Input[str] payment_type: The payment type of the resource. Valid values: `PayAsYouGo`,`Subscription`.
:param pulumi.Input[str] period: Pre-paid cluster of the pay-as-you-go cycle. Valid values: `Month`, `Year`.
:param pulumi.Input[str] status: The status of the resource. Valid values: `Running`,`Creating`,`Deleting`,`Restarting`,`Preparing`,.
:param pulumi.Input[str] storage_type: Storage type of DBCluster. Valid values: `cloud_essd`, `cloud_efficiency`, `cloud_essd_pl2`, `cloud_essd_pl3`.
:param pulumi.Input[str] used_time: The used time of DBCluster.
:param pulumi.Input[str] vswitch_id: The vswitch id of DBCluster.
"""
if category is not None:
pulumi.set(__self__, "category", category)
if db_cluster_class is not None:
pulumi.set(__self__, "db_cluster_class", db_cluster_class)
if db_cluster_description is not None:
pulumi.set(__self__, "db_cluster_description", db_cluster_description)
if db_cluster_network_type is not None:
pulumi.set(__self__, "db_cluster_network_type", db_cluster_network_type)
if db_cluster_version is not None:
pulumi.set(__self__, "db_cluster_version", db_cluster_version)
if db_node_group_count is not None:
pulumi.set(__self__, "db_node_group_count", db_node_group_count)
if db_node_storage is not None:
pulumi.set(__self__, "db_node_storage", db_node_storage)
if encryption_key is not None:
pulumi.set(__self__, "encryption_key", encryption_key)
if encryption_type is not None:
pulumi.set(__self__, "encryption_type", encryption_type)
if maintain_time is not None:
pulumi.set(__self__, "maintain_time", maintain_time)
if payment_type is not None:
pulumi.set(__self__, "payment_type", payment_type)
if period is not None:
pulumi.set(__self__, "period", period)
if status is not None:
pulumi.set(__self__, | |
not None
nbin = edges.shape[0]+1
if weighted:
count = zeros(nbin, dtype=w.dtype)
if normed:
count = zeros(nbin, dtype=float)
w = w/w.mean()
else:
count = zeros(nbin, int)
binindex = digitize(a, edges)
# Count the number of identical indices.
flatcount = bincount(binindex, w)
# Place the count in the histogram array.
count[:len(flatcount)] = flatcount
return count
def _optimize_binning(x, range, method='Freedman'):
"""Find the optimal number of bins.
Available methods : Freedman, Scott
"""
N = x.shape[0]
if method.lower()=='freedman':
s=sort(x)
IQR = s[int(N*.75)] - s[int(N*.25)] # Interquantile range (75% -25%)
width = 2* IQR*N**(-1./3)
elif method.lower()=='scott':
width = 3.49 * x.std()* N**(-1./3)
else:
raise ValueError('Method must be Scott or Freedman', method)
return int(diff(range)/width)
def normcdf(x, log=False):
"""Normal cumulative density function."""
y = np.atleast_1d(x).copy()
flib.normcdf(y)
if log:
# return np.where(y>0, np.log(y), -np.inf)
return np.array([-np.inf if not yi else np.log(yi) for yi in y])
return y
def lognormcdf(x, mu, tau):
"""Log-normal cumulative density function"""
x = np.atleast_1d(x)
return np.array([0.5*(1-flib.derf(-(np.sqrt(tau/2))*(np.log(y)-mu))) for y in x])
def invcdf(x):
"""Inverse of normal cumulative density function."""
x = np.atleast_1d(x)
return np.array([flib.ppnd16(y,1) for y in x])
def ar1_gen(rho, mu, sigma, size=1):
"""Create an autoregressive series of order one AR(1) generator.
.. math::
X_t = \mu_t + \rho (X_{t-1}-\mu_{t-1} + \epsilon_t
If mu is a sequence and size > len(mu), the algorithm loops through
mu.
:Stochastics:
rho : scalar in [0,1]
mu : scalar or sequence
sigma : scalar > 0
size : integer
"""
mu = np.asarray(mu, float)
mu = np.resize(mu, size)
r = mu.copy()
r += np.random.randn(size)*sigma
r[0] = np.random.randn(1)*sigma/np.sqrt(1-rho**2)
i = 0
while True:
yield r[i]
i+=1
if i==size:
break
r[i] += rho*(r[i-1]-mu[i-1])
def ar1(rho, mu, sigma, size=1):
"""Return an autoregressive series of order one AR(1).
.. math::
X_t = \mu_t + \rho (X_{t-1}-\mu_{t-1} + \epsilon_t
If mu is a sequence and size > len(mu), the algorithm loops through
mu.
:Stochastics:
rho : scalar in [0,1]
mu : scalar or sequence
sigma : scalar > 0
size : integer
"""
return np.array([x for x in ar1_gen(rho, mu, sigma, size)])
def autocorr(x, lag=1):
"""Sample autocorrelation at specified lag.
The autocorrelation is the correlation of x_i with x_{i+lag}.
"""
if not lag: return 1
if lag<0: return
# x = np.squeeze(asarray(x))
# mu = x.mean()
# v = x.var()
# return ((x[:-lag]-mu)*(x[lag:]-mu)).sum()/v/(len(x) - lag)
S = autocov(x, lag)
return S[0,1]/sqrt(prod(diag(S)))
def autocov(x, lag=1):
"""
Sample autocovariance at specified lag.
The autocovariance is a 2x2 matrix with the variances of
x[:-lag] and x[lag:] in the diagonal and the autocovariance
on the off-diagonal.
"""
if not lag: return 1
if lag<0: return
return cov(x[:-lag], x[lag:], bias=1)
def trace_generator(trace, start=0, stop=None, step=1):
"""Return a generator returning values from the object's trace.
Ex:
T = trace_generator(theta.trace)
T.next()
for t in T:...
"""
i = start
stop = stop or np.inf
size = min(trace.length(), stop)
while i < size:
index = slice(i, i+1)
yield trace.gettrace(slicing=index)[0]
i+=step
def draw_random(obj, **kwds):
"""Draw random variates from obj.random method.
If the object has parents whose value must be updated, use
parent_name=trace_generator_function.
Ex:
R = draw_random(theta, beta=pymc.utils.trace_generator(beta.trace))
R.next()
"""
while True:
for k,v in six.iteritems(kwds):
obj.parents[k] = v.next()
yield obj.random()
def rec_getattr(obj, attr):
"""Get object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> rec_getattr(a, 'b.c')
4
"""
return reduce(getattr, attr.split('.'), obj)
def rec_setattr(obj, attr, value):
"""Set object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> rec_setattr(a, 'b.c', 2)
>>> a.b.c
2
"""
attrs = attr.split('.')
setattr(reduce(getattr, attrs[:-1], obj), attrs[-1], value)
def hpd(x, alpha):
"""Calculate HPD (minimum width BCI) of array for given alpha
:Arguments:
x : Numpy array
An array containing MCMC samples
alpha : float
Desired probability of type I error
"""
# Make a copy of trace
x = x.copy()
# For multivariate node
if x.ndim>1:
# Transpose first, then sort
tx = tr(x, range(x.ndim)[1:]+[0])
dims = shape(tx)
# Container list for intervals
intervals = np.resize(0.0, dims[:-1]+(2,))
for index in make_indices(dims[:-1]):
try:
index = tuple(index)
except TypeError:
pass
# Sort trace
sx = sort(tx[index])
# Append to list
intervals[index] = calc_min_interval(sx, alpha)
# Transpose back before returning
return array(intervals)
else:
# Sort univariate node
sx = sort(x)
return array(calc_min_interval(sx, alpha))
def make_indices(dimensions):
# Generates complete set of indices for given dimensions
level = len(dimensions)
if level==1: return range(dimensions[0])
indices = [[]]
while level:
_indices = []
for j in range(dimensions[level-1]):
_indices += [[j]+i for i in indices]
indices = _indices
level -= 1
try:
return [tuple(i) for i in indices]
except TypeError:
return indices
def calc_min_interval(x, alpha):
"""Internal method to determine the minimum interval of
a given width"""
# Initialize interval
min_int = [None,None]
try:
# Number of elements in trace
n = len(x)
# Start at far left
start, end = 0, int(n*(1-alpha))
# Initialize minimum width to large value
min_width = inf
while end < n:
# Endpoints of interval
hi, lo = x[end], x[start]
# Width of interval
width = hi - lo
# Check to see if width is narrower than minimum
if width < min_width:
min_width = width
min_int = [lo, hi]
# Increment endpoints
start +=1
end += 1
return min_int
except IndexError:
print_('Too few elements for interval calculation')
return [None,None]
def quantiles(x, qlist=(2.5, 25, 50, 75, 97.5)):
"""Returns a dictionary of requested quantiles from array
:Arguments:
x : Numpy array
An array containing MCMC samples
qlist : tuple or list
A list of desired quantiles (defaults to (2.5, 25, 50, 75, 97.5))
"""
# Make a copy of trace
x = x.copy()
# For multivariate node
if x.ndim>1:
# Transpose first, then sort, then transpose back
sx = sort(x.T).T
else:
# Sort univariate node
sx = sort(x)
try:
# Generate specified quantiles
quants = [sx[int(len(sx)*q/100.0)] for q in qlist]
return dict(zip(qlist, quants))
except IndexError:
print_("Too few elements for quantile calculation")
def coda_output(pymc_object, name=None, chain=-1):
"""Generate output files that are compatible with CODA
:Arguments:
pymc_object : Model or Node
A PyMC object containing MCMC output.
"""
print_()
print_("Generating CODA output")
print_('='*50)
if name is None:
name = pymc_object.__name__
# Open trace file
trace_file = open(name+'_coda.out', 'w')
# Open index file
index_file = open(name+'_coda.ind', 'w')
variables = [pymc_object]
if hasattr(pymc_object, 'stochastics'):
variables = pymc_object.stochastics
# Initialize index
index = 1
# Loop over all parameters
for v in variables:
vname = v.__name__
print_("Processing", vname)
try:
index = _process_trace(trace_file, index_file, v.trace(chain=chain), vname, index)
except TypeError:
pass
# Close files
trace_file.close()
index_file.close()
# Lazy shortcut
coda = coda_output
def _process_trace(trace_file, index_file, trace, name, index):
"""Support function for coda_output(); writes output to files"""
if ndim(trace)>1:
trace = swapaxes(trace, 0, 1)
for i, seq in enumerate(trace):
_name = '%s_%s' % (name, i)
index = _process_trace(trace_file, index_file, seq, _name, index)
else:
index_buffer = '%s\t%s\t' % (name, index)
for i, val in enumerate(trace):
trace_file.write('%s\t%s\r\n' % (i+1, val))
index += 1
index_file.write('%s%s\r\n' % (index_buffer, index-1))
return index
def log_difference(lx, ly):
"""Returns log(exp(lx) - exp(ly)) without leaving log space."""
# Negative log of double-precision infinity
li=-709.78271289338397
diff = ly - lx
# Make sure log-difference can succeed
if np.any(diff>=0):
raise ValueError('Cannot compute log(x-y), because y>=x for some elements.')
# Otherwise evaluate log-difference
return lx + np.log(1.-np.exp(diff))
def getInput():
"""Read the input buffer without blocking the system."""
input = ''
if sys.platform=='win32':
import msvcrt
if msvcrt.kbhit(): # Check for a keyboard hit.
input += msvcrt.getch()
print_(input)
else:
time.sleep(.1)
else: # Other platforms
# Posix will work with sys.stdin or sys.stdin.fileno()
# Mac needs the file descriptor.
# This solution does not work for windows since select
# expects a socket, and I have no idea how to create a
# socket from standard input.
sock = sys.stdin.fileno()
#select(rlist, wlist, xlist, timeout)
while len(select.select([sock], [], [], 0.1)[0])>0:
input += os.read(sock, 4096)
return input
def crawl_dataless(sofar, gens):
"""
Crawls out from v to find the biggest dataless submodel containing v.
TODO: Let MCMC start the crawl from its last generation. It doesn't
| |
#!/usr/bin/env python
#
# Copyright (c) 2019 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
"""Download the Swedish Traffic Signs dataset and create the Speed Limit Signs
dataset from and train with attention sampling.
NOTE: Swedish Traffic Signs dataset is provided from
https://www.cvl.isy.liu.se/research/datasets/traffic-signs-dataset/ .
"""
import argparse
from collections import namedtuple
from functools import partial
import hashlib
import urllib.request
import os
from os import path
import string
import sys
import zipfile
from cv2 import imread, imwrite
from keras.callbacks import Callback, ModelCheckpoint, LearningRateScheduler
from keras.layers import Activation, BatchNormalization, Conv2D, \
GlobalAveragePooling2D, MaxPooling2D, Dense, Input, add
from keras.models import Model
from keras.optimizers import SGD, Adam
from keras.regularizers import l2
from keras.utils import Sequence, plot_model
import numpy as np
from ats.core import attention_sampling
from ats.utils.layers import L2Normalize, ResizeImages, SampleSoftmax, \
ImageLinearTransform, ImagePan
from ats.utils.regularizers import multinomial_entropy
from ats.utils.training import Batcher
def check_file(filepath, md5sum):
"""Check a file against an md5 hash value.
Returns
-------
True if the file exists and has the given md5 sum False otherwise
"""
try:
md5 = hashlib.md5()
with open(filepath, "rb") as f:
for chunk in iter(partial(f.read, 4096), b""):
md5.update(chunk)
return md5.hexdigest() == md5sum
except FileNotFoundError:
return False
def download_file(url, destination, progress_file=sys.stderr):
"""Download a file with progress."""
response = urllib.request.urlopen(url)
n_bytes = response.headers.get("Content-Length")
if n_bytes == "":
n_bytes = 0
else:
n_bytes = int(n_bytes)
message = "\rReceived {} / {}"
cnt = 0
with open(destination, "wb") as dst:
while True:
print(message.format(cnt, n_bytes), file=progress_file,
end="", flush=True)
data = response.read(65535)
if len(data) == 0:
break
dst.write(data)
cnt += len(data)
print(file=progress_file)
def ensure_dataset_exists(directory, tries=1, progress_file=sys.stderr):
"""Ensure that the dataset is downloaded and is correct.
Correctness is checked only against the annotations files.
"""
set1_url = ("http://www.isy.liu.se/cvl/research/trafficSigns"
"/swedishSignsSummer/Set1/Set1Part0.zip")
set1_annotations_url = ("http://www.isy.liu.se/cvl/research/trafficSigns"
"/swedishSignsSummer/Set1/annotations.txt")
set1_annotations_md5 = "9106a905a86209c95dc9b51d12f520d6"
set2_url = ("http://www.isy.liu.se/cvl/research/trafficSigns"
"/swedishSignsSummer/Set2/Set2Part0.zip")
set2_annotations_url = ("http://www.isy.liu.se/cvl/research/trafficSigns"
"/swedishSignsSummer/Set2/annotations.txt")
set2_annotations_md5 = "09debbc67f6cd89c1e2a2688ad1d03ca"
integrity = (
check_file(
path.join(directory, "Set1", "annotations.txt"),
set1_annotations_md5
) and check_file(
path.join(directory, "Set2", "annotations.txt"),
set2_annotations_md5
)
)
if integrity:
return
if tries <= 0:
raise RuntimeError(("Cannot download dataset or dataset download "
"is corrupted"))
print("Downloading Set1", file=progress_file)
download_file(set1_url, path.join(directory, "Set1.zip"),
progress_file=progress_file)
print("Extracting...", file=progress_file)
with zipfile.ZipFile(path.join(directory, "Set1.zip")) as archive:
archive.extractall(path.join(directory, "Set1"))
print("Getting annotation file", file=progress_file)
download_file(
set1_annotations_url,
path.join(directory, "Set1", "annotations.txt"),
progress_file=progress_file
)
print("Downloading Set2", file=progress_file)
download_file(set2_url, path.join(directory, "Set2.zip"),
progress_file=progress_file)
print("Extracting...", file=progress_file)
with zipfile.ZipFile(path.join(directory, "Set2.zip")) as archive:
archive.extractall(path.join(directory, "Set2"))
print("Getting annotation file", file=progress_file)
download_file(
set2_annotations_url,
path.join(directory, "Set2", "annotations.txt"),
progress_file=progress_file
)
return ensure_dataset_exists(
directory,
tries=tries-1,
progress_file=progress_file
)
class Sign(namedtuple("Sign", ["visibility", "bbox", "type", "name"])):
"""A sign object. Useful for making ground truth images as well as making
the dataset."""
@property
def x_min(self):
return self.bbox[2]
@property
def x_max(self):
return self.bbox[0]
@property
def y_min(self):
return self.bbox[3]
@property
def y_max(self):
return self.bbox[1]
@property
def area(self):
return (self.x_max - self.x_min) * (self.y_max - self.y_min)
@property
def center(self):
return [
(self.y_max - self.y_min)/2 + self.y_min,
(self.x_max - self.x_min)/2 + self.x_min
]
@property
def visibility_index(self):
visibilities = ["VISIBLE", "BLURRED", "SIDE_ROAD", "OCCLUDED"]
return visibilities.index(self.visibility)
def pixels(self, scale, size):
return zip(*(
(i, j)
for i in range(round(self.y_min*scale), round(self.y_max*scale)+1)
for j in range(round(self.x_min*scale), round(self.x_max*scale)+1)
if i < round(size[0]*scale) and j < round(size[1]*scale)
))
def __lt__(self, other):
if not isinstance(other, Sign):
raise ValueError("Signs can only be compared to signs")
if self.visibility_index != other.visibility_index:
return self.visibility_index < other.visibility_index
return self.area > other.area
class STS:
"""The STS class reads the annotations and creates the corresponding
Sign objects."""
def __init__(self, directory, train=True, seed=0):
ensure_dataset_exists(directory)
self._directory = directory
self._inner = "Set{}".format(1 + ((seed + 1 + int(train)) % 2))
self._data = self._load_signs(self._directory, self._inner)
def _load_files(self, directory, inner):
files = set()
with open(path.join(directory, inner, "annotations.txt")) as f:
for l in f:
files.add(l.split(":", 1)[0])
return sorted(files)
def _read_bbox(self, parts):
def _float(x):
try:
return float(x)
except ValueError:
if len(x) > 0:
return _float(x[:-1])
raise
return [_float(x) for x in parts]
def _load_signs(self, directory, inner):
with open(path.join(directory, inner, "annotations.txt")) as f:
lines = [l.strip() for l in f]
keys, values = zip(*(l.split(":", 1) for l in lines))
all_signs = []
for v in values:
signs = []
for sign in v.split(";"):
if sign == [""] or sign == "":
continue
parts = [s.strip() for s in sign.split(",")]
if parts[0] == "MISC_SIGNS":
continue
signs.append(Sign(
visibility=parts[0],
bbox=self._read_bbox(parts[1:5]),
type=parts[5],
name=parts[6]
))
all_signs.append(signs)
images = [path.join(directory, inner, f) for f in keys]
return list(zip(images, all_signs))
def __len__(self):
return len(self._data)
def __getitem__(self, i):
return self._data[i]
class SpeedLimits(Sequence):
"""Provide a Keras Sequence for the SpeedLimits dataset which is basically
a filtered version of the STS dataset.
Arguments
---------
directory: str, The directory that the dataset already is or is going
to be downloaded in
train: bool, Select the training or testing sets
seed: int, The prng seed for the dataset
"""
LIMITS = ["50_SIGN", "70_SIGN", "80_SIGN"]
CLASSES = ["EMPTY", *LIMITS]
def __init__(self, directory, train=True, seed=0):
self._data = self._filter(STS(directory, train, seed))
def _filter(self, data):
filtered = []
for image, signs in data:
signs, acceptable = self._acceptable(signs)
if acceptable:
if not signs:
filtered.append((image, 0))
else:
filtered.append((image, self.CLASSES.index(signs[0].name)))
return filtered
def _acceptable(self, signs):
# Keep it as empty
if not signs:
return signs, True
# Filter just the speed limits and sort them wrt visibility
signs = sorted(s for s in signs if s.name in self.LIMITS)
# No speed limit but many other signs
if not signs:
return None, False
# Not visible sign so skip
if signs[0].visibility != "VISIBLE":
return None, False
return signs, True
def __len__(self):
return len(self._data)
def __getitem__(self, i):
image, category = self._data[i]
data = imread(image)
data = data.astype(np.float32) / np.float32(255.)
label = np.eye(len(self.CLASSES), dtype=np.float32)[category]
return data, label
@property
def image_size(self):
return self[0][0].shape[:2]
@property
def class_frequencies(self):
"""Compute and return the class specific frequencies."""
freqs = np.zeros(len(self.CLASSES), dtype=np.float32)
for image, category in self._data:
freqs[category] += 1
return freqs/len(self._data)
def strided(self, N):
"""Extract N images almost in equal proportions from each category."""
order = np.arange(len(self._data))
np.random.shuffle(order)
idxs = []
cat = 0
while len(idxs) < N:
for i in order:
image, category = self._data[i]
if cat == category:
idxs.append(i)
cat = (cat + 1) % len(self.CLASSES)
if len(idxs) >= N:
break
return idxs
class AttentionSaver(Callback):
"""Save the attention maps to monitor model evolution."""
def __init__(self, output_directory, att_model, training_set):
self._dir = path.join(output_directory, "attention")
try:
os.mkdir(self._dir)
except FileExistsError:
pass
self._att_model = att_model
idxs = training_set.strided(10)
data = [training_set[i] for i in idxs]
self._X = np.array([d[0] for d in data])
self._Y = np.array([d[1] for d in data]).argmax(axis=1)
np.savetxt(
path.join(self._dir, "points.txt"),
np.array([[i, yi] for i, yi in zip(idxs, self._Y)]).astype(int),
fmt="%d"
)
def on_train_begin(self, *args):
_, _, x_low = self._att_model.predict(self._X)
for i, xi in enumerate(x_low):
self._imsave(path.join(self._dir, "{}.jpg").format(i), xi)
def on_epoch_end(self, e, logs):
att, patches, _ = self._att_model.predict(self._X)
for i, att_i in enumerate(att):
np.save(path.join(self._dir, "att_{}_{}.npy").format(e, i), att_i)
def _imsave(self, filepath, x):
x = (x*255).astype(np.uint8)
imwrite(filepath, x)
def resnet(x, strides=[1, 2, 2, 2], filters=[32, 32, 32, 32]):
"""Implement a simple resnet."""
# Do a convolution on x
def c(x, filters, kernel, strides):
return Conv2D(filters, kernel_size=kernel, strides=strides,
padding="same", use_bias=False)(x)
# Do a BatchNorm on x
def b(x):
return BatchNormalization()(x)
# Obviosuly just do relu
def relu(x):
return Activation("relu")(x)
# Implement a resnet block. short is True when we need to add a convolution
# for the shortcut
def block(x, filters, strides, short):
x = b(x)
x = relu(x)
x_short = x
if short:
x_short = c(x, filters, 1, strides)
x = c(x, filters, 3, strides)
x = b(x)
x = relu(x)
x = c(x, filters, 3, 1)
x = add([x, x_short])
return x
# Implement the resnet
stride_prev = strides.pop(0)
filters_prev = filters.pop(0)
y = c(x, filters_prev, 3, stride_prev)
for s, f in zip(strides, filters):
y = block(y, f, s, s != 1 or f != filters_prev)
stride_prev = s
filters_prev = f
y = b(y)
y = relu(y)
# Average the final features and normalize them
y = GlobalAveragePooling2D()(y)
y = L2Normalize()(y)
return y
def attention(x):
params = dict(
activation="relu",
padding="valid",
kernel_regularizer=l2(1e-5)
)
x = Conv2D(8, kernel_size=3, **params)(x)
x = Conv2D(16, kernel_size=3, **params)(x)
x = Conv2D(32, kernel_size=3, **params)(x)
x = Conv2D(1, kernel_size=3)(x)
x = MaxPooling2D(pool_size=8)(x)
x = SampleSoftmax(squeeze_channels=True, smooth=1e-4)(x)
return x
def get_model(outputs, width, height, scale, n_patches, patch_size, reg):
x_in = Input(shape=(height, width, 3))
x_high = ImageLinearTransform()(x_in)
x_high = ImagePan(horizontally=True, vertically=True)(x_high)
x_low = ResizeImages((int(height*scale), int(width*scale)))(x_high)
features, att, patches = attention_sampling(
attention,
resnet,
| |
HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v2_show(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param bool show_deleted: If true, shows the soft deleted thing
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ArduinoThing
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.things_v2_show_with_http_info(id, **kwargs) # noqa: E501
def things_v2_show_with_http_info(self, id, **kwargs): # noqa: E501
"""show things_v2 # noqa: E501
Returns the thing requested by the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v2_show_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param bool show_deleted: If true, shows the soft deleted thing
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ArduinoThing, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'show_deleted'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method things_v2_show" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `things_v2_show`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'show_deleted' in local_var_params and local_var_params['show_deleted'] is not None: # noqa: E501
query_params.append(('show_deleted', local_var_params['show_deleted'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v2/things/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArduinoThing', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def things_v2_update(self, id, thing, **kwargs): # noqa: E501
"""update things_v2 # noqa: E501
Updates a thing associated to the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v2_update(id, thing, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param Thing thing: ThingPayload describes a thing (required)
:param bool force: If true, detach device from the other thing, and attach to this thing
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ArduinoThing
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.things_v2_update_with_http_info(id, thing, **kwargs) # noqa: E501
def things_v2_update_with_http_info(self, id, thing, **kwargs): # noqa: E501
"""update things_v2 # noqa: E501
Updates a thing associated to the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v2_update_with_http_info(id, thing, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param Thing thing: ThingPayload describes a thing (required)
:param bool force: If true, detach device from the other thing, and attach to this thing
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ArduinoThing, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'thing',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method things_v2_update" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `things_v2_update`") # noqa: E501
# verify the required parameter 'thing' is set
if self.api_client.client_side_validation and ('thing' not in local_var_params or # noqa: E501
local_var_params['thing'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `thing` when calling `things_v2_update`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'thing' in local_var_params:
body_params = local_var_params['thing']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v2/things/{id}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArduinoThing', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def things_v2_update_sketch(self, id, sketch_id, **kwargs): # noqa: E501
"""updateSketch things_v2 # noqa: E501
Update an existing thing sketch # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v2_update_sketch(id, sketch_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param str sketch_id: The id of the sketch (required)
:param UpdateSketch update_sketch:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ArduinoThing
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.things_v2_update_sketch_with_http_info(id, sketch_id, **kwargs) # noqa: E501
def things_v2_update_sketch_with_http_info(self, id, sketch_id, **kwargs): # noqa: E501
"""updateSketch things_v2 # noqa: E501
Update an existing thing sketch # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v2_update_sketch_with_http_info(id, sketch_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param str sketch_id: The id of the sketch (required)
:param UpdateSketch update_sketch:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ArduinoThing, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
| |
from __future__ import division, absolute_import, print_function
from past.builtins import xrange
import numpy as np
import esutil
import time
import matplotlib.pyplot as plt
from .fgcmUtilities import objFlagDict
from .fgcmUtilities import obsFlagDict
from .sharedNumpyMemManager import SharedNumpyMemManager as snmm
class FgcmStars(object):
"""
Class to describe the stars and observations of the stars. Note that
after initialization you must call loadStarsFromFits() or loadStars()
to load the star information. This allows an external caller to clear
out memory after it has been copied to the shared memory buffers.
parameters
----------
fgcmConfig: FgcmConfig
Config variables
----------------
minObsPerBand: int
Minumum number of observations per band to be "good"
sedFitBandFudgeFactors: float array
Fudge factors for computing fnuprime for the fit bands
sedExtraBandFudgeFactors: float array
Fudge factors for computing fnuprime for the extra bands
starColorCuts: list
List that contains lists of [bandIndex0, bandIndex1, minColor, maxColor]
sigma0Phot: float
Floor on photometric error to add to every observation
reserveFraction: float
Fraction of stars to hold in reserve
mapLongitudeRef: float
Reference longitude for plotting maps of stars
mapNSide: int
Healpix nside of map plotting.
superStarSubCCD: bool
Use sub-ccd info to make superstar flats?
obsFile: string, only if using fits mode
Star observation file
indexFile: string, only if using fits mode
Star index file
"""
def __init__(self,fgcmConfig):
self.fgcmLog = fgcmConfig.fgcmLog
self.fgcmLog.info('Initializing stars.')
self.obsFile = fgcmConfig.obsFile
self.indexFile = fgcmConfig.indexFile
self.bands = fgcmConfig.bands
self.nBands = len(fgcmConfig.bands)
self.nCCD = fgcmConfig.nCCD
self.minObsPerBand = fgcmConfig.minObsPerBand
self.fitBands = fgcmConfig.fitBands
self.nFitBands = len(fgcmConfig.fitBands)
self.extraBands = fgcmConfig.extraBands
self.sedFitBandFudgeFactors = fgcmConfig.sedFitBandFudgeFactors
self.sedExtraBandFudgeFactors = fgcmConfig.sedExtraBandFudgeFactors
self.starColorCuts = fgcmConfig.starColorCuts
self.sigma0Phot = fgcmConfig.sigma0Phot
self.ccdStartIndex = fgcmConfig.ccdStartIndex
self.plotPath = fgcmConfig.plotPath
self.outfileBaseWithCycle = fgcmConfig.outfileBaseWithCycle
self.expField = fgcmConfig.expField
self.ccdField = fgcmConfig.ccdField
self.reserveFraction = fgcmConfig.reserveFraction
self.modelMagErrors = fgcmConfig.modelMagErrors
self.inFlagStarFile = fgcmConfig.inFlagStarFile
self.mapLongitudeRef = fgcmConfig.mapLongitudeRef
self.mapNSide = fgcmConfig.mapNSide
self.lambdaStdBand = fgcmConfig.lambdaStdBand
self.bandRequiredFlag = fgcmConfig.bandRequiredFlag
self.bandRequiredIndex = np.where(self.bandRequiredFlag)[0]
self.bandExtraFlag = fgcmConfig.bandExtraFlag
self.bandExtraIndex = np.where(self.bandExtraFlag)[0]
self.lutFilterNames = fgcmConfig.lutFilterNames
self.filterToBand = fgcmConfig.filterToBand
self.superStarSubCCD = fgcmConfig.superStarSubCCD
#self.expArray = fgcmPars.expArray
#self._loadStars(fgcmPars)
self.magStdComputed = False
self.allMagStdComputed = False
self.sedSlopeComputed = False
#if (computeNobs):
# allExps = np.arange(fgcmConfig.expRange[0],fgcmConfig.expRange[1],dtype='i4')
# self.fgcmLog.info('Checking stars with full possible range of exp numbers')
#self.selectStarsMinObs(goodExps=allExps,doPlots=False)
# allExpsIndex = np.arange(fgcmPars.expArray.size)
# self.selectStarsMinObsExpIndex(allExpsIndex)
self.magConstant = 2.5/np.log(10)
self.hasXY = False
def loadStarsFromFits(self,fgcmPars,computeNobs=True):
"""
Load stars from fits files.
parameters
----------
fgcmPars: FgcmParameters
computeNobs: bool, default=True
Compute number of observations of each star/band
Config variables
----------------
indexFile: string
Star index file
obsFile: string
Star observation file
inFlagStarFile: string, optional
Flagged star file
"""
import fitsio
# read in the observation indices...
startTime = time.time()
self.fgcmLog.info('Reading in observation indices...')
index = fitsio.read(self.indexFile, ext='INDEX')
self.fgcmLog.info('Done reading in %d observation indices in %.1f seconds.' %
(index.size, time.time() - startTime))
# read in obsfile and cut
startTime = time.time()
self.fgcmLog.info('Reading in star observations...')
obs = fitsio.read(self.obsFile, ext=1)
# cut down to those that are indexed
obs = obs[index['OBSINDEX']]
self.fgcmLog.info('Done reading in %d observations in %.1f seconds.' %
(obs.size, time.time() - startTime))
# and positions...
startTime = time.time()
self.fgcmLog.info('Reading in star positions...')
pos = fitsio.read(self.indexFile, ext='POS')
self.fgcmLog.info('Done reading in %d unique star positions in %.1f secondds.' %
(pos.size, time.time() - startTime))
#obsBand = np.core.defchararray.strip(obs['BAND'][:])
obsFilterName = np.core.defchararray.strip(obs['FILTERNAME'][:])
if (self.inFlagStarFile is not None):
self.fgcmLog.info('Reading in list of previous flagged stars from %s' %
(self.inFlagStarFile))
inFlagStars = fitsio.read(self.inFlagStarFile, ext=1)
flagID = inFlagStars['OBJID']
flagFlag = inFlagStars['OBJFLAG']
else:
flagID = None
flagFlag = None
# FIXME: add support to x/y from fits files
if ('X' in obs.dtype.names and 'Y' in obs.dtype.names):
self.fgcmLog.info('Found X/Y in input observations')
obsX = obs['X']
obsY = obs['Y']
else:
obsX = None
obsY = None
# process
self.loadStars(fgcmPars,
obs[self.expField],
obs[self.ccdField],
obs['RA'],
obs['DEC'],
obs['MAG'],
obs['MAGERR'],
obsFilterName,
pos['FGCM_ID'],
pos['RA'],
pos['DEC'],
pos['OBSARRINDEX'],
pos['NOBS'],
obsX=obsX,
obsY=obsY,
flagID=flagID,
flagFlag=flagFlag,
computeNobs=computeNobs)
# and clear memory
index = None
obs = None
pos = None
def loadStars(self, fgcmPars,
obsExp, obsCCD, obsRA, obsDec, obsMag, obsMagErr, obsFilterName,
objID, objRA, objDec, objObsIndex, objNobs, obsX=None, obsY=None,
flagID=None, flagFlag=None, computeNobs=True):
"""
Load stars from arrays
parameters
----------
fgcmPars: fgcmParameters
obsExp: int array
Exposure number (or equivalent) for each observation
obsCCD: int array
CCD number (or equivalent) for each observation
obsRA: double array
RA for each observation (degrees)
obsDec: double array
Dec for each observation (degrees)
obsMag: float array
Raw ADU magnitude for each observation
obsMagErr: float array
Raw ADU magnitude error for each observation
obsFilterName: string array
Filter name for each observation
objID: int array
Unique ID number for each object
objRA: double array
RA for each object (degrees)
objDec: double array
Dec for each object (degrees)
objObsIndex: int array
For each object, where in the obs table to look
objNobs: int array
number of observations of this object (all bands)
obsX: float array, optional
x position for each observation
obsY: float array, optional
y position for each observation
flagID: int array, optional
ID of each object that is flagged from previous cycle
flagFlag: int array, optional
Flag value from previous cycle
computeNobs: bool, default=True
Compute number of good observations of each object?
"""
# FIXME: check that these are all the same length!
self.obsIndexHandle = snmm.createArray(obsRA.size, dtype='i4')
snmm.getArray(self.obsIndexHandle)[:] = np.arange(obsRA.size)
# need to stuff into shared memory objects.
# nStarObs: total number of observations of all starus
self.nStarObs = obsRA.size
# obsExp: exposure number of individual observation (pointed by obsIndex)
self.obsExpHandle = snmm.createArray(self.nStarObs,dtype='i4')
# obsExpIndex: exposure index
self.obsExpIndexHandle = snmm.createArray(self.nStarObs,dtype='i4')
# obsCCD: ccd number of individual observation
self.obsCCDHandle = snmm.createArray(self.nStarObs,dtype='i2')
# obsBandIndex: band index of individual observation
self.obsBandIndexHandle = snmm.createArray(self.nStarObs,dtype='i2')
# obsLUTFilterIndex: filter index in LUT of individual observation
self.obsLUTFilterIndexHandle = snmm.createArray(self.nStarObs,dtype='i2')
# obsFlag: individual bad observation
self.obsFlagHandle = snmm.createArray(self.nStarObs,dtype='i2')
# obsRA: RA of individual observation
self.obsRAHandle = snmm.createArray(self.nStarObs,dtype='f8')
# obsDec: Declination of individual observation
self.obsDecHandle = snmm.createArray(self.nStarObs,dtype='f8')
# obsSecZenith: secant(zenith) of individual observation
self.obsSecZenithHandle = snmm.createArray(self.nStarObs,dtype='f8')
# obsMagADU: log raw ADU counts of individual observation
## FIXME: need to know default zeropoint?
self.obsMagADUHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsMagADUErr: raw ADU counts error of individual observation
self.obsMagADUErrHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsMagADUModelErr: modeled ADU counts error of individual observation
self.obsMagADUModelErrHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsSuperStarApplied: SuperStar correction that was applied
self.obsSuperStarAppliedHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsMagStd: corrected (to standard passband) mag of individual observation
self.obsMagStdHandle = snmm.createArray(self.nStarObs,dtype='f4',syncAccess=True)
if (obsX is not None and obsY is not None):
self.hasXY = True
# obsX: x position on the CCD of the given observation
self.obsXHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsY: y position on the CCD of the given observation
self.obsYHandle = snmm.createArray(self.nStarObs,dtype='f4')
else:
# hasXY = False
if self.superStarSubCCD:
raise ValueError("Input stars do not have x/y but superStarSubCCD is set.")
snmm.getArray(self.obsExpHandle)[:] = obsExp
snmm.getArray(self.obsCCDHandle)[:] = obsCCD
snmm.getArray(self.obsRAHandle)[:] = obsRA
snmm.getArray(self.obsDecHandle)[:] = obsDec
snmm.getArray(self.obsMagADUHandle)[:] = obsMag
snmm.getArray(self.obsMagADUErrHandle)[:] = obsMagErr
snmm.getArray(self.obsMagStdHandle)[:] = obsMag # same as raw at first
snmm.getArray(self.obsSuperStarAppliedHandle)[:] = 0.0
if self.hasXY:
snmm.getArray(self.obsXHandle)[:] = obsX
snmm.getArray(self.obsYHandle)[:] = obsY
self.fgcmLog.info('Applying sigma0Phot = %.4f to mag errs' %
(self.sigma0Phot))
obsMagADUErr = snmm.getArray(self.obsMagADUErrHandle)
obsFlag = snmm.getArray(self.obsFlagHandle)
bad, = np.where(obsMagADUErr <= 0.0)
obsFlag[bad] |= obsFlagDict['BAD_ERROR']
if (bad.size > 0):
self.fgcmLog.info('Flagging %d observations with bad errors.' %
(bad.size))
obsMagADUErr[:] = np.sqrt(obsMagADUErr[:]**2. + self.sigma0Phot**2.)
# Initially, we set the model error to the observed error
obsMagADUModelErr = snmm.getArray(self.obsMagADUModelErrHandle)
obsMagADUModelErr[:] = obsMagADUErr[:]
startTime = time.time()
self.fgcmLog.info('Matching observations to exposure table.')
obsExpIndex = snmm.getArray(self.obsExpIndexHandle)
obsExpIndex[:] = -1
a,b=esutil.numpy_util.match(fgcmPars.expArray,
snmm.getArray(self.obsExpHandle)[:])
obsExpIndex[b] = a
self.fgcmLog.info('Observations matched in %.1f seconds.' %
(time.time() - startTime))
bad, = np.where(obsExpIndex < 0)
obsFlag[bad] |= obsFlagDict['NO_EXPOSURE']
if (bad.size > 0):
self.fgcmLog.info('Flagging %d observations with no associated exposure.' %
(bad.size))
# match bands and filters to indices
startTime = time.time()
self.fgcmLog.info('Matching observations to bands.')
#for i in xrange(self.nBands):
# use, = np.where(obsBand == self.bands[i])
# if (use.size == 0):
# raise ValueError("No observations in band %s!" % (self.bands[i]))
# snmm.getArray(self.obsBandIndexHandle)[use] = i
# new version for multifilter support
# First, we have the filterNames
for filterIndex,filterName in enumerate(self.lutFilterNames):
#try:
# bandIndex, = np.where(self.filterToBand[filterName] == self.bands)
#except:
# self.fgcmLog.info('WARNING: observations with filter %s not in config' % (filterName))
# bandIndex = -1
try:
bandIndex = self.bands.index(self.filterToBand[filterName])
except:
self.fgcmLog.info('WARNING: observations with filter %s not in config' % (filterName))
bandIndex = -1
# obsFilterName is an array from fits/numpy. | |
<reponame>desihub/desisurvey
"""Manage static information associated with tiles, programs and passes.
Each tile has an assigned program name. The program names
(DARK, BRIGHT) are predefined in terms of conditions on the
ephemerides, but not all programs need to be present in a tiles file.
Pass numbers are arbitrary integers and do not need to be consecutive or dense.
To ensure consistent and efficient usage of static tile info, all code
should use::
tiles = desisurvey.tiles.get_tiles()
To use a non-standard tiles file, change the configuration before the
first call to ``get_tiles()`` with::
config = desisurvey.config.Configuration()
config.tiles_file.set_value(name)
The :class:`Tiles` class returned by :func:`get_tiles` is a wrapper around
the FITS table contained in a tiles file, that adds some precomputed derived
attributes for consistency and efficiency.
"""
from __future__ import print_function, division
import os
import numpy as np
import astropy.units as u
from astropy.time import Time
from astropy.table import Table
import desimodel.io
import desiutil.log
import desisurvey.config
import desisurvey.utils
import desisurvey.etc
class Tiles(object):
"""Manage static info associated with the tiles file.
Parameters
----------
tile_file : str or None
Name of the tiles file to use or None for the default specified
in our configuration.
"""
def __init__(self, tiles_file=None):
config = desisurvey.config.Configuration()
self.nogray = config.tiles_nogray()
bright_allowed_in_dark = getattr(
config, 'bright_allowed_in_dark', None)
if bright_allowed_in_dark is not None:
self.bright_allowed_in_dark = bright_allowed_in_dark()
else:
self.bright_allowed_in_dark = False
# Read the specified tiles file.
self.tiles_file = tiles_file or config.tiles_file()
self.tiles_file = find_tile_file(self.tiles_file)
tiles = self.read_tiles_table()
# Copy tile arrays.
self.tileID = tiles['TILEID'].data.copy()
self.tileRA = tiles['RA'].data.copy()
self.tileDEC = tiles['DEC'].data.copy()
self.tileprogram = np.array([p.strip() for p in tiles['PROGRAM']])
self.tilepass = tiles['PASS'].data.copy()
self.designha = None
if 'DESIGNHA' in tiles.dtype.names:
self.designha = tiles['DESIGNHA'].data.copy()
self.priority_boostfac = np.ones(len(tiles), dtype='f4')
if 'PRIORITY_BOOSTFAC' in tiles.dtype.names:
self.priority_boostfac = tiles['PRIORITY_BOOSTFAC']
self.tileobsconditions = self.get_conditions()
if self.nogray:
mgray = self.tileobsconditions == 'GRAY'
self.tileobsconditions[mgray] = 'DARK'
self.in_desi = tiles['IN_DESI'].data.copy() != 0
# Count tiles.
self.ntiles = len(self.tileID)
# Can remove this when tile_index no longer uses searchsorted.
if not np.all(np.diff(self.tileID) > 0):
raise RuntimeError('Tile IDs are not increasing.')
self.programs = [x for x in np.unique(tiles['PROGRAM'].data)]
self.program_index = {pname: pidx
for pidx, pname in enumerate(self.programs)}
# Build tile masks for each program. A program will no tiles with have an empty mask.
self.program_mask = {}
for p in self.programs:
self.program_mask[p] = (self.tileprogram == p) & self.in_desi
# Calculate and save dust exposure factors.
self.dust_factor = desisurvey.etc.dust_exposure_factor(tiles['EBV_MED'].data)
# Precompute coefficients to calculate tile observing airmass.
latitude = np.radians(config.location.latitude())
tile_dec_rad = np.radians(self.tileDEC)
self.tile_coef_A = np.sin(tile_dec_rad) * np.sin(latitude)
self.tile_coef_B = np.cos(tile_dec_rad) * np.cos(latitude)
# Placeholders for overlap attributes that are expensive to calculate
# so we use lazy evaluation the first time they are accessed.
self._overlapping = None
self._neighbors = None
self._fiberassign_delay = None
# Calculate the maximum |HA| in degrees allowed for each tile to stay
# above the survey minimum altitude
cosZ_min = np.cos(90 * u.deg - config.min_altitude())
cosHA_min = (
(cosZ_min - np.sin(self.tileDEC * u.deg) * np.sin(latitude)) /
(np.cos(self.tileDEC * u.deg) * np.cos(latitude))).value
cosHA_min = np.clip(cosHA_min, -1, 1)
self.max_abs_ha = np.degrees(np.arccos(cosHA_min))
m = ~np.isfinite(self.max_abs_ha) | (self.max_abs_ha < 3.75)
self.max_abs_ha[m] = 7.5 # always give at least a half hour window.
CONDITIONS = ['DARK', 'GRAY', 'BRIGHT']
CONDITION_INDEX = {cond: i for i, cond in enumerate(CONDITIONS)}
def airmass(self, hour_angle, mask=None):
"""Calculate tile airmass given hour angle.
Parameters
----------
hour_angle : array
Array of hour angles in degrees to use. If mask is None, then should have length
``self.ntiles``. Otherwise, should have a value per non-zero entry in the mask.
mask : array or None
Boolean mask of which tiles to perform the calculation for.
Returns
-------
array
Array of airmasses corresponding to each input hour angle.
"""
hour_angle = np.deg2rad(hour_angle)
if mask is None:
mask = slice(None)
cosZ = self.tile_coef_A[mask] + self.tile_coef_B[mask] * np.cos(hour_angle)
return desisurvey.utils.cos_zenith_to_airmass(cosZ)
def airmass_at_mjd(self, mjd, mask=None):
"""Calculate tile airmass at given MJD.
Parameters
----------
mjd : array
Array of MJD to use. If mask is None, then should have length
``self.ntiles``. Otherwise, should have a value per non-zero entry
in the mask.
mask : array or None
Boolean mask of which tiles to perform the calculation for.
Returns
-------
array
Array of airmasses corresponding to each input hour angle.
"""
mjd = np.atleast_1d(mjd)
if len(mjd) == 0:
return np.zeros(0, dtype='f8')
tt = Time(mjd, format='mjd', location=desisurvey.utils.get_location())
lst = tt.sidereal_time('apparent').to(u.deg).value
ha = lst - self.tileRA[mask]
return self.airmass(ha, mask=mask)
def airmass_second_derivative(self, HA, mask=None):
"""Calculate second derivative of airmass with HA.
Useful for determining how close to design airmass we have to get
for different tiles. When this is large, we really need to observe
things right at their design angles. When it's small, we have more
flexibility.
"""
x = self.airmass(HA, mask=mask)
if mask is not None:
b = self.tile_coef_B[mask]
else:
b = self.tile_coef_B
d2rad = b*x**2 * (2*b*x*np.sin(np.radians(HA))**2 +
np.cos(np.radians(HA)))
return d2rad * (np.pi/180)**2
def index(self, tileID, return_mask=False):
"""Map tile ID to array index.
Parameters
----------
tileID : int or array
Tile ID value(s) to convert.
mask : bool
if mask=True, an additional mask array is returned, indicating which
IDs were present in the tile array. Otherwise, an exception is
raised if tiles were not found.
Returns
-------
int or array
Index into internal per-tile arrays corresponding to each input tile ID.
"""
scalar = np.isscalar(tileID)
tileID = np.atleast_1d(tileID)
if np.any(tileID < 0):
raise ValueError('tileIDs must positive!')
idx = np.searchsorted(self.tileID, tileID)
idx = np.clip(idx, 0, len(self.tileID)-1)
bad = self.tileID[idx] != tileID
if not return_mask and np.any(bad):
raise ValueError('Invalid tile ID(s): {}.'.format(tileID[bad]))
mask = ~bad
idx = idx[0] if scalar else idx
mask = mask[0] if scalar else mask
res = idx
if return_mask:
res = (res, mask)
return res
def get_conditions(self):
res = []
config = desisurvey.config.Configuration()
for program in self.tileprogram:
tprogram = getattr(config.programs, program, None)
if tprogram is None:
res.append('NONE')
else:
res.append(tprogram.conditions())
return np.array(res)
def allowed_in_conditions(self, cond):
if self.nogray and (cond == 'GRAY'):
cond = 'DARK'
res = (self.tileobsconditions == cond)
if self.bright_allowed_in_dark and (cond == 'DARK'):
res = res | (self.tileobsconditions == 'BRIGHT')
return res
@property
def overlapping(self):
"""Dictionary of tile overlap matrices.
overlapping[i] is the list of tile row numbers that overlap the
tile with row number i.
Overlapping tiles are only computed within a program; a tile cannot
overlap a tile of a different program. If fiber_assignment_delay is
negative, tile do not overlap one another within a program.
"""
if self._overlapping is None:
self._calculate_overlaps()
return self._overlapping
@property
def neighbors(self):
"""Dictionary of tile neighbor matrices.
neighbors[i] is the list of tile row numbers that neighbor the
tile with row number i within a pass.
Neighboring tiles are only computed within a program and pass.
"""
if self._neighbors is None:
self._calculate_neighbors()
return self._neighbors
@property
def fiberassign_delay(self):
"""Delay between covering a tile and when it can be fiber assigned.
Units are determined by the value of the fiber_assignment_cadence
configuration parameter.
"""
if self._fiberassign_delay is None:
self._calculate_overlaps()
return self._fiberassign_delay
def _calculate_overlaps(self):
"""Initialize attributes _overlapping.
Uses the config parameters ``fiber_assignment_delay`` and
``tile_diameter`` to determine overlap dependencies.
This is relatively slow, so only used the first time ``overlapping``
properties are accessed.
"""
self._overlapping = [[] for _ in range(self.ntiles)]
self._fiberassign_delay = np.full(self.ntiles, -1, int)
config = desisurvey.config.Configuration()
tile_diameter = 2 * config.tile_radius()
fiber_assignment_delay = config.fiber_assignment_delay
for program in self.programs:
delay = getattr(fiber_assignment_delay, program, None)
if delay is not None:
delay = delay()
else:
delay = -1
m = self.program_mask[program] & (self.in_desi != 0)
rownum = np.flatnonzero(m)
self._fiberassign_delay[m] = delay
# self._overlapping: list of lists, giving tiles overlapping each
# tile
if delay < 0:
# this program doesn't have overlapping tile requirements
continue
from astropy.coordinates import SkyCoord, search_around_sky
c = SkyCoord(self.tileRA[m]*u.deg, self.tileDEC[m]*u.deg)
idx1, idx2, sep2d, dist3d = search_around_sky(c, c, tile_diameter)
for ind1, ind2 in zip(idx1, idx2):
if ind1 == ind2:
# ignore self matches
continue
self._overlapping[rownum[ind1]].append(rownum[ind2])
def _calculate_neighbors(self):
"""Initialize attribute _neighbors. A neigbor is defined as a tile
in the same pass within 3 * config.tile_radius if config.tiles_lowpass
is True. Otherwise, it's all tiles within the program within
3 * config.tile_radius.
This is relatively slow, so only used the first time ``overlapping``
properties | |
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple
from . import violation
from .violation import Violation
from sqlint.config import Config
from sqlint.syntax_tree import SyntaxTree, Node
from sqlint.parser import Token
from sqlint.parser.keywords import format as format_keyword
class Checker(metaclass=ABCMeta):
@staticmethod
@abstractmethod
def check(tree: SyntaxTree, config: Config) -> List[Violation]:
pass
class IndentStepsChecker(Checker):
@staticmethod
def check(tree: SyntaxTree, config: Config) -> List[Violation]:
# TODO: Enable users to ignore violation cases by config.
# Checks whether indent steps are N times.
indent_steps = config.indent_steps
return IndentStepsChecker._check(tree, indent_steps)
@staticmethod
def _check(tree: SyntaxTree, indent_steps: int) -> List[Violation]:
violation_list: List[Violation] = list()
for leaf in tree.leaves:
if leaf.indent % indent_steps != 0:
v = violation.IndentStepsViolation(
tree=leaf,
index=0,
expected=indent_steps,
actual=leaf.indent)
violation_list.append(v)
violation_list.extend(
IndentStepsChecker._check(leaf, indent_steps))
return violation_list
class KeywordStyleChecker(Checker):
"""Checks reserved keywords style.
Whether reserved keywords match one of following formats.
- lower: e.g) select
- upper-all: e.g) SELECT
- upper-head: e.g) Select
"""
@staticmethod
def check(tree: SyntaxTree, config: Config) -> List[Violation]:
# TODO: Enable users to ignore violation cases by config.
keyword_style = config.keyword_style
return KeywordStyleChecker._check(tree, keyword_style)
@staticmethod
def _check(tree: SyntaxTree, keyword_style: str) -> List[Violation]:
violation_list: List[Violation] = list()
for leaf in tree.leaves:
for idx, token in enumerate(leaf.tokens):
if token.kind not in [Token.KEYWORD, Token.FUNCTION]:
continue
word: str = token.word
expected: str = format_keyword(word, keyword_style)
if word != expected:
params = {'style': keyword_style, 'actual': word, 'expected': expected}
v = violation.KeywordStyleViolation(
tree=leaf,
index=idx,
**params)
violation_list.append(v)
violation_list.extend(
KeywordStyleChecker._check(leaf, keyword_style))
return violation_list
class CommaChecker(Checker):
""" Checks violations about comma.
1. Whether comma is head or end of a line.(default: head)
"""
@staticmethod
def check(tree: SyntaxTree, config: Config) -> List[Violation]:
# TODO: Enable users to ignore violation cases by config.
comma_position = config.comma_position
result: List[Violation] = []
# 1. Whether comma is head or end of a line.(default: head)
result.extend(CommaChecker._check_position(tree, comma_position))
return result
@staticmethod
def _check_position(tree: SyntaxTree, comma_position: str) -> List[Violation]:
violation_list: List[Violation] = list()
lb = Token('(', Token.BRACKET_LEFT)
rb = Token(')', Token.BRACKET_RIGHT)
for leaf in tree.leaves:
# removes whitespaces and comments at head and end of line.
ltripped_node: Node = leaf.node.ltrip_kind(Token.WHITESPACE, Token.COMMENT)
lindex = len(leaf.node) - len(ltripped_node)
tokens = ltripped_node.rtrip_kind(Token.WHITESPACE, Token.COMMENT).tokens
comma_indexes = [i for i, x in enumerate(tokens) if x == ',']
if comma_position == 'head':
comma_indexes = [i for i in comma_indexes if i != 0]
elif comma_position == 'end':
comma_indexes = [i for i in comma_indexes if i != len(tokens)-1]
for idx in comma_indexes:
# If a comma is in brackets, it is appropriate not to break a line at the comma.
# Determines that by counting left- and right- brackets at left-right-side.
is_open_bracket = 0 < (tokens[0:idx].count(lb) - tokens[0:idx].count(rb))
is_close_bracket = 0 < (tokens[idx+1:].count(rb) - tokens[idx+1:].count(lb))
if not is_open_bracket or not is_close_bracket:
violation_list.append(
violation.CommaPositionViolation(
tree=leaf,
index=lindex+idx,
comma_position=comma_position))
violation_list.extend(
CommaChecker._check_position(leaf, comma_position))
return violation_list
class WhitespaceChecker(Checker):
""" Checks violations about whitespace.
1. Whether multiple whitespaces exist.
2. Whether a Whitespace is after a comma and not before it.
3. Whether a Whitespace is after a bracket and not before it.
4. Whether a Whitespace is after a operator and not before it.
"""
@staticmethod
def check(tree: SyntaxTree, config: Config) -> List[Violation]:
# TODO: Enable users to ignore violation cases by config.
result: List[Violation] = []
# 1. Whether comma is head or end of a line.(default: head)
result.extend(WhitespaceChecker._check_multiple(tree))
# 2. Whether a Whitespace is after a comma and not before it.
result.extend(WhitespaceChecker._check_comma(tree))
# 3. Whether a Whitespace is after and before bracket.
result.extend(WhitespaceChecker._check_bracket(tree))
# 4. Whether a Whitespace is after and before operator.
result.extend(WhitespaceChecker._check_operator(tree))
return result
@staticmethod
def _check_multiple(tree: SyntaxTree) -> List[Violation]:
violation_list: List[Violation] = list()
for leaf in tree.leaves:
# ignores token at head of a line
tokens = leaf.tokens[1:]
for idx, tk in enumerate(tokens):
length = len(tk)
# ignores except whitespaces
if tk.kind != tk.WHITESPACE:
continue
# 2 spaces before comment is valid
if length == 2 and (idx+1 < len(tokens) and tokens[idx+1].kind == Token.COMMENT):
continue
if length > 1:
v = violation.MultiSpacesViolation(tree=leaf, index=idx)
violation_list.append(v)
violation_list.extend(WhitespaceChecker._check_multiple(leaf))
return violation_list
@staticmethod
def _check_comma(tree: SyntaxTree) -> List[Violation]:
violation_list: List[Violation] = list()
for leaf in tree.leaves:
# Comma at end of line dose not need to checked
for idx, token in enumerate(leaf.tokens[:-1]):
if token.kind != Token.COMMA:
continue
# Checks that a whitespace does not exist before comma.
# However, when comma is at head of line, it is allowed that whitespace is before.
if idx >= 2 and leaf.tokens[idx-1].kind == Token.WHITESPACE:
params = {'token': Token.COMMA,
'position': 'before'}
violation_list.append(
violation.WhitespaceViolation(
tree=leaf,
index=idx,
**params))
# checks whether a whitespace exists after comma.
if leaf.tokens[idx+1].kind != Token.WHITESPACE:
params = {'token': Token.COMMA,
'position': 'after',
'target': f'{token.word}{leaf.tokens[idx+1].word}'}
violation_list.append(
violation.WhitespaceViolation(
tree=leaf,
index=idx,
**params))
violation_list.extend(
WhitespaceChecker._check_comma(leaf))
return violation_list
@staticmethod
def _check_bracket(tree: SyntaxTree) -> List[Violation]:
violation_list: List[Violation] = list()
for leaf in tree.leaves:
# Comma at end of line dose not need to checked
for idx, token in enumerate(leaf.tokens[:-1]):
# Checks whether a whitespace does not exist after left-bracket "( ".
if token.kind == Token.BRACKET_LEFT \
and leaf.tokens[idx+1].kind == Token.WHITESPACE:
params = {'token': Token.BRACKET_LEFT,
'position': 'after',
'target': f'{token.word}{leaf.tokens[idx+1].word}'}
violation_list.append(
violation.WhitespaceViolation(tree=leaf, index=idx, **params))
# Checks whether a whitespace does not exist before right-bracket " )".
if token.kind == Token.BRACKET_RIGHT \
and (idx >= 2 and leaf.tokens[idx-1].kind == Token.WHITESPACE):
params = {
'token': Token.BRACKET_RIGHT,
'position': 'before',
'target': f'{leaf.tokens[idx-1].word}{token.word}'}
violation_list.append(
violation.WhitespaceViolation(
tree=leaf,
index=idx,
**params))
violation_list.extend(
WhitespaceChecker._check_bracket(leaf))
return violation_list
@staticmethod
def _check_operator(tree: SyntaxTree) -> List[Violation]:
violation_list: List[Violation] = list()
for leaf in tree.leaves:
# Comma at end of line dose not need to checked
for idx, token in enumerate(leaf.tokens[:-1]):
if token.kind != Token.OPERATOR:
continue
# Checks whether a whitespace exists before operator.
if idx >= 2 and leaf.tokens[idx-1].kind != Token.WHITESPACE:
params = {
'token': Token.OPERATOR,
'position': 'before',
'target': f'{leaf.tokens[idx-1].word}{token.word}'}
violation_list.append(
violation.WhitespaceViolation(tree=leaf, index=idx, **params))
# Checks whether a whitespace exists after operator.
if leaf.tokens[idx + 1].kind != Token.WHITESPACE:
params = {
'token': Token.OPERATOR,
'position': 'after',
'target': f'{token.word}{leaf.tokens[idx + 1].word}'}
violation_list.append(
violation.WhitespaceViolation(tree=leaf, index=idx, **params))
violation_list.extend(
WhitespaceChecker._check_operator(leaf))
return violation_list
class JoinChecker(Checker):
""" Checks violations about join context.
1. Whether join context and table name are same line.
2. Whether join contexts are described fully, for example [inner join], [left outer join], [right outer join]
"""
@staticmethod
def check(tree: SyntaxTree, config: Config) -> List[Violation]:
# TODO: Enable users to ignore violation cases by config.
result: List[Violation] = []
# 1. Whether join context and table name are same line.
result.extend(JoinChecker._check_table_existance(tree))
# 2. Whether join contexts are described fully, for example [inner join], [left outer join], [right outer join]
expected_kvs = {
'LEFT': ['LEFT', 'OUTER', 'JOIN'],
'RIGHT': ['RIGHT', 'OUTER', 'JOIN'],
'FULL': ['FULL', 'OUTER', 'JOIN'],
'OUTER': ['[LEFT|RIGHT|FULL]', 'OUTER', 'JOIN'],
'INNER': ['INNER', 'JOIN'],
'CROSS': ['CROSS', 'JOIN'],
}
expected_list = {}
for k, vs in expected_kvs.items():
_key = JoinChecker._format_str(k)
_value = ' '.join([JoinChecker._format_str(v) for v in vs])
expected_list[_key] = _value
result.extend(JoinChecker._check_context(tree, expected_list))
return result
@staticmethod
def _format_str(value: str) -> str:
return value.upper()
@staticmethod
def _check_table_existance(tree: SyntaxTree) -> List[Violation]:
"""Checks the token next to 'Join' is identifier(maybe table_name) or SubQuery """
violation_list: List[Violation] = list()
for leaf in tree.leaves:
for idx, token in enumerate(leaf.tokens):
# ignores token except join
if token.word.upper() != 'JOIN':
continue
# ignores the token next to 'JOIN' is identifier which may be table.
if idx <= len(leaf.tokens)-2 and leaf.tokens[idx+2].kind == Token.IDENTIFIER:
continue
# TODO: Checks below
# TODO: SubQueries will become violation in the future.
"""
Ignores the token next to 'Join' is 'Select' (maybe SubQuery)
Examples:
1) ------
From
x
Join Select id From y
------
2) ------
From
x
Join (Select id From y)
------
"""
v = violation.JoinTableNotExistViolation(tree=leaf, index=idx)
violation_list.append(v)
violation_list.extend(JoinChecker._check_table_existance(leaf))
return violation_list
@staticmethod
def _check_context(tree: SyntaxTree, expected_list: Dict[str, str]) -> List[Violation]:
"""Checks whether join are described fully, for example [inner join], [left outer join], [right outer join] """
violation_list: List[Violation] = list()
# TODO: too deeply nest and complex code
for leaf in tree.leaves:
join_indexes = [i for i, x in enumerate(leaf.tokens) if x.word.upper() == 'JOIN']
for idx in join_indexes:
token = leaf.tokens[idx]
# concat keyword concerned with join
join_contexts = [token.word]
# only 'JOIN' is expected | |
import json
import requests
from http import client as http_client
import pickle
import os
from bs4 import BeautifulSoup
from factorial.exceptions import AuthenticationTokenNotFound, UserNotLoggedIn, ApiError
import hashlib
import logging
import logging.config
import random
from datetime import date
from constants import BASE_PROJECT, LOGGER
class FactorialClient:
# Folder to save the session's cookie
SESSIONS_FOLDER = os.path.join(BASE_PROJECT, "sessions")
# Default factorial settings file
DEFAULT_FACTORIAL_SETTINGS = os.path.join(BASE_PROJECT, 'factorial_settings.json')
# Endpoints
BASE_NAME = "https://api.factorialhr.com/"
# Url to be able to login (post: username, password) and logout (delete) on the api
SESSION_URL = '{}sessions'.format(BASE_NAME)
# Url to show the form to get the authentication token (get)
LOGIN_PAGE_URL = '{}users/sign_in'.format(BASE_NAME)
# Url to get the user info (get)
USER_INFO_URL = '{}accesses'.format(BASE_NAME)
# Get employee (get)
EMPLOYEE_URL = '{}employees'.format(BASE_NAME)
# Get period (get)
PERIODS_URL = '{}attendance/periods'.format(BASE_NAME)
# Shift days (get, post, patch, delete)
SHIFT_URL = '{}attendance/shifts'.format(BASE_NAME)
# Calendar (get)
CALENDAR_URL = '{}attendance/calendar'.format(BASE_NAME)
def __init__(self, email, password, cookie_file=None):
"""Factorial client to automatically sign up the work
:param email: (required) string, email to login on Factorial
:param password: (required) string, password to login on Factorial
:param cookie_file: (optional) string, file to save the cookies
"""
self.email = email
self.password = password
self.current_user = {}
self.mates = []
self.session = requests.Session()
# Be able to save the cookies on a file specified, or save each user on a different email for multi account
self.cookie_file = cookie_file or hashlib.sha512(email.encode('utf-8')).hexdigest()
cookie_path = os.path.join(self.SESSIONS_FOLDER, self.cookie_file)
if os.path.exists(cookie_path):
with open(cookie_path, "rb") as file:
# TODO: Watch out the expiration of the cookie
LOGGER.info('Getting the session from cookies files')
self.session.cookies.update(pickle.load(file))
def login(self):
"""Login on the factorial web
:return: boolean if is logged in
"""
try:
self.load_user_data()
# Try to load the user info using the cookie, if can't login again using the username and password
LOGGER.info('Already logged in, re-login is not needed')
return True
except UserNotLoggedIn:
payload = {
'utf8': '✓',
'authenticity_token': self.generate_new_token(),
'user[email]': self.email,
'user[password]': <PASSWORD>,
'user[remember_me]': "0",
'commit': 'Iniciar sesión'
}
response = self.session.post(url=self.SESSION_URL, data=payload)
loggedin = response.status_code == http_client.CREATED
if loggedin:
LOGGER.info('Login successfully')
# Load user data
self.load_user_data()
# Save the cookies if is logged in
if not os.path.exists(self.SESSIONS_FOLDER):
os.mkdir(self.SESSIONS_FOLDER)
with open(os.path.join(self.SESSIONS_FOLDER, self.cookie_file), "wb") as file:
pickle.dump(self.session.cookies, file)
LOGGER.info('Sessions saved')
return loggedin
@staticmethod
def generate_new_token():
"""Generate new token to be able to login"""
response = requests.get(url=FactorialClient.LOGIN_PAGE_URL)
soup = BeautifulSoup(response.text, 'html.parser')
auth_token = soup.find('input', attrs={'name': 'authenticity_token'})
token_value = auth_token.get('value')
if not token_value:
raise AuthenticationTokenNotFound()
return token_value
@staticmethod
def load_from_settings(json_settings=DEFAULT_FACTORIAL_SETTINGS):
"""Login from the settings if the session still valid from the saved cookies, otherwise ask for the password
:param json_settings: string config filename
:return: FactorialClient
"""
with open(json_settings, 'r') as file:
settings = json.load(file)
factorial_client = FactorialClient(email=settings.get('email', ''),
password=settings.get('password', ''))
if not factorial_client.login():
# Session valid with the current cookie
raise ApiError('Cannot login with the given credentials')
return factorial_client
@staticmethod
def split_time(time):
"""Split time to hour and minutes
:param time: string time 7:30
:return: tuple(hours, minutes)
"""
return (int(t) for t in time.split(':'))
@staticmethod
def convert_to_minutes(hours, minutes):
"""Convert time to minutes
:param hours: int
:param minutes: int
:return: int
"""
return hours * 60 + minutes
@staticmethod
def convert_to_time(minutes):
"""Convert minutes to time
:param minutes: int
:return: tuple(hours, minutes)
"""
converted_hours = int(minutes / 60)
converted_minutes = int(minutes - converted_hours * 60)
return converted_hours, converted_minutes
@staticmethod
def get_total_minutes_period(start_hours, start_minutes, end_hours, end_minutes):
"""Get total minutes for a period
:param start_hours: int hours
:param start_minutes: int minutes
:param end_hours: int hours
:param end_minutes: int minutes
:return: total minutes
"""
start_minutes = FactorialClient.convert_to_minutes(start_hours, start_minutes)
end_minutes = FactorialClient.convert_to_minutes(end_hours, end_minutes)
return end_minutes - start_minutes
@staticmethod
def get_random_number(start, end):
"""Get random number between two numbers, both included
Eg:
start = -10
end = 10
1 * (10 - -10) + -10 = 10
0 * (10 - -10) + -10 = -10
:param start: int start
:param end: int end
:return: int random number between start and end
"""
return random.random() * (end - start) + start
@staticmethod
def random_time(hours, minutes, minutes_variation):
"""Variation between minutes
:param hours: int current hour
:param minutes: int current minutes
:param minutes_variation: int minutes to variate
:return: tuple (hours, minutes)
"""
# Minutes variation of 10 will be a random between -10 and 10
random_minutes_variation = FactorialClient.get_random_number(start=-minutes_variation, end=minutes_variation)
# Pass hours and minutes to all minutes
total_minutes = FactorialClient.convert_to_minutes(hours, minutes)
# Remove or add the minutes variation
variated_minutes = total_minutes + random_minutes_variation
# Pass to hours and minutes
return FactorialClient.convert_to_time(variated_minutes)
def check_status_code(self, status_code, status_code_error, message=None):
"""Check if the call of the endpoint is correct
:param status_code: HttpStatus
:param status_code_error: HttpStatus
:param message: string
"""
if status_code == http_client.UNAUTHORIZED:
raise UserNotLoggedIn()
elif status_code != status_code_error:
raise ApiError(message)
def generate_period(self, start, end, minutes_variation):
"""Generate a period with a random variation
:param start: string time
:param end: string time
:param minutes_variation: int minutes to variate
:return: tuple (start_hours, start_minutes, end_hours, end_minutes)
"""
start_hours, start_minutes = self.split_time(start)
end_hours, end_minutes = self.split_time(end)
total_minutes = self.get_total_minutes_period(start_hours, start_minutes, end_hours, end_minutes)
start_sign_hour, start_sign_minutes = FactorialClient.random_time(start_hours, start_minutes, minutes_variation)
end_sign_hour, end_sign_minutes = self.convert_to_time(self.convert_to_minutes(start_sign_hour, start_sign_minutes) + total_minutes)
return start_sign_hour, start_sign_minutes, end_sign_hour, end_sign_minutes
def add_breaks_to_period(self, start_sign_hour, start_sign_minutes, end_sign_hour, end_sign_minutes, breaks):
"""Add breaks for a period
:return list of periods, tuple(start_hour, start_minute, end_hour, end_minute)
"""
periods = []
start_hour = start_sign_hour
start_minute = start_sign_minutes
for _break in sorted(breaks, key=lambda current_break: self.convert_to_minutes(current_break['start_hour'], current_break['start_minute']), reverse=False):
break_start_hour = _break.get('start_hour')
break_start_minute = _break.get('start_minute')
break_end_hour = _break.get('end_hour')
break_end_minute = _break.get('end_minute')
periods.append({
'start_hour': start_hour,
'start_minute': start_minute,
'end_hour': break_start_hour,
'end_minute': break_start_minute
})
start_hour = break_end_hour
start_minute = break_end_minute
# End period
periods.append({
'start_hour': start_hour,
'start_minute': start_minute,
'end_hour': end_sign_hour,
'end_minute': end_sign_minutes
})
return periods
def generate_worked_periods(self, start_work, end_work, work_minutes_variation, breaks):
"""Generate worked periods with breaks
:param start_work: string time
:param end_work: string time
:param work_minutes_variation: int minutes to variate
:param breaks: list of dictionaries
:return: list of periods
"""
start_sign_hour, start_sign_minutes, end_sign_hour, end_sign_minutes = self.generate_period(start_work, end_work, work_minutes_variation)
breaks_with_variation = []
for _break in breaks:
start_break_hour, start_break_minutes, end_break_hour, end_break_minutes = self.generate_period(**_break)
breaks_with_variation.append({
'start_hour': start_break_hour,
'start_minute': start_break_minutes,
'end_hour': end_break_hour,
'end_minute': end_break_minutes
})
return self.add_breaks_to_period(start_sign_hour, start_sign_minutes, end_sign_hour, end_sign_minutes, breaks_with_variation)
def worked_day(self, day=date.today(), json_settings=DEFAULT_FACTORIAL_SETTINGS):
"""Mark today as worked day
:param day: date to save the worked day, by default is today
:param json_settings: string config filename
"""
with open(json_settings, 'r') as file:
settings = json.load(file)
work_settings_block = settings.get('work', {})
start_work = work_settings_block.get('start', '')
end_work = work_settings_block.get('end', '')
work_minutes_variation = work_settings_block.get('minutes_variation', 0)
breaks = work_settings_block.get('breaks', [])
already_work = self.get_day(year=day.year, month=day.month, day=day.day)
if already_work:
if work_settings_block.get('resave'):
for worked_period in already_work:
self.delete_worked_period(worked_period.get('id'))
else:
LOGGER.info('Day already sign')
return
add_worked_period_kwargs = {
'year': day.year,
'month': day.month,
'day': day.day,
# Dynamic over loop fields
'start_hour': 0,
'start_minute': 0,
'end_hour': 0,
'end_minute': 0
}
worked_periods = self.generate_worked_periods(start_work, end_work, work_minutes_variation, breaks)
for worked_period in worked_periods:
start_hour = worked_period.get('start_hour')
start_minute = worked_period.get('start_minute')
end_hour = worked_period.get('end_hour')
end_minute = worked_period.get('end_minute')
add_worked_period_kwargs.update({
'start_hour': start_hour,
'start_minute': start_minute,
'end_hour': end_hour,
'end_minute': end_minute,
})
if self.add_worked_period(**add_worked_period_kwargs):
LOGGER.info('Saved worked period for the day {0:s} between {1:02d}:{2:02d} - {3:02d}:{4:02d}'.format(
day.isoformat(),
start_hour, start_minute,
end_hour, end_minute))
def logout(self):
"""Logout invalidating that session, invalidating the cookie _factorial_session
:return: bool
"""
response = self.session.delete(url=self.SESSION_URL)
logout_correcty = response.status_code == http_client.NO_CONTENT
LOGGER.info('Logout successfully {}'.format(logout_correcty))
self.session = requests.Session()
path_file = os.path.join(self.SESSIONS_FOLDER, self.cookie_file)
if os.path.exists(path_file):
os.remove(path_file)
logging.info('Logout: Removed cookies file')
self.mates.clear()
self.current_user = {}
return logout_correcty
def load_employees(self):
"""Load employees info
Example:
[
{
'access_id'
'birthday_on'
'hired_on'
'job_title'
'id',
'manager_id'
'supervised_by_current'
'terminated_on'
'is_terminating'
'timeoff_policy_id'
'timeoff_manager_id'
'timeoff_supervised_by_current'
'location_id'
'employee_group_id'
'payroll_hiring_id'
'is_eligible_for_payroll'
}
]
"""
LOGGER.info("Loading employees")
employee_response = self.session.get(self.EMPLOYEE_URL)
self.check_status_code(employee_response.status_code, http_client.OK)
employee_json = employee_response.json()
for employee in employee_json:
# Update the user info that match the self.mates[n].id with employee.access_id
for mate in self.mates:
if mate.get('id') == employee.get('access_id'):
mate.update(employee)
if self.current_user.get('id') == employee.get('access_id'):
self.current_user.update(employee)
def load_user_data(self):
"""Load info about your user
Example:
```
[
{
"id": <integer>,
"user_id": <integer>,
"company_id": <integer>,
"invited": true,
"invited_on": "YYYY-MM-DD",
"role": "basic",
"current": true/false,
"calendar_token": null,
"first_name": "sss",
"last_name": "sss",
"email": "sss@sss",
"unconfirmed_email": null,
"joined": true/false,
"locale": "xx",
"avatar": null,
"tos": true
},
...
]
```
"""
self.mates.clear()
self.current_user = {}
response = self.session.get(url=self.USER_INFO_URL)
self.check_status_code(response.status_code, http_client.OK)
json_response = response.json()
for user in json_response:
current_user = user
if | |
<reponame>pulumi/pulumi-databricks
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['MwsCustomerManagedKeysArgs', 'MwsCustomerManagedKeys']
@pulumi.input_type
class MwsCustomerManagedKeysArgs:
def __init__(__self__, *,
account_id: pulumi.Input[str],
aws_key_info: pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs'],
use_cases: pulumi.Input[Sequence[pulumi.Input[str]]],
creation_time: Optional[pulumi.Input[int]] = None,
customer_managed_key_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a MwsCustomerManagedKeys resource.
:param pulumi.Input[str] account_id: Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/)
:param pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs'] aws_key_info: This field is a block and is documented below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] use_cases: *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are:
:param pulumi.Input[int] creation_time: (Integer) Time in epoch milliseconds when the customer key was created.
:param pulumi.Input[str] customer_managed_key_id: (String) ID of the encryption key configuration object.
"""
pulumi.set(__self__, "account_id", account_id)
pulumi.set(__self__, "aws_key_info", aws_key_info)
pulumi.set(__self__, "use_cases", use_cases)
if creation_time is not None:
pulumi.set(__self__, "creation_time", creation_time)
if customer_managed_key_id is not None:
pulumi.set(__self__, "customer_managed_key_id", customer_managed_key_id)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> pulumi.Input[str]:
"""
Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/)
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: pulumi.Input[str]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter(name="awsKeyInfo")
def aws_key_info(self) -> pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs']:
"""
This field is a block and is documented below.
"""
return pulumi.get(self, "aws_key_info")
@aws_key_info.setter
def aws_key_info(self, value: pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs']):
pulumi.set(self, "aws_key_info", value)
@property
@pulumi.getter(name="useCases")
def use_cases(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
*(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are:
"""
return pulumi.get(self, "use_cases")
@use_cases.setter
def use_cases(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "use_cases", value)
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> Optional[pulumi.Input[int]]:
"""
(Integer) Time in epoch milliseconds when the customer key was created.
"""
return pulumi.get(self, "creation_time")
@creation_time.setter
def creation_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "creation_time", value)
@property
@pulumi.getter(name="customerManagedKeyId")
def customer_managed_key_id(self) -> Optional[pulumi.Input[str]]:
"""
(String) ID of the encryption key configuration object.
"""
return pulumi.get(self, "customer_managed_key_id")
@customer_managed_key_id.setter
def customer_managed_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_managed_key_id", value)
@pulumi.input_type
class _MwsCustomerManagedKeysState:
def __init__(__self__, *,
account_id: Optional[pulumi.Input[str]] = None,
aws_key_info: Optional[pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs']] = None,
creation_time: Optional[pulumi.Input[int]] = None,
customer_managed_key_id: Optional[pulumi.Input[str]] = None,
use_cases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering MwsCustomerManagedKeys resources.
:param pulumi.Input[str] account_id: Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/)
:param pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs'] aws_key_info: This field is a block and is documented below.
:param pulumi.Input[int] creation_time: (Integer) Time in epoch milliseconds when the customer key was created.
:param pulumi.Input[str] customer_managed_key_id: (String) ID of the encryption key configuration object.
:param pulumi.Input[Sequence[pulumi.Input[str]]] use_cases: *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are:
"""
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if aws_key_info is not None:
pulumi.set(__self__, "aws_key_info", aws_key_info)
if creation_time is not None:
pulumi.set(__self__, "creation_time", creation_time)
if customer_managed_key_id is not None:
pulumi.set(__self__, "customer_managed_key_id", customer_managed_key_id)
if use_cases is not None:
pulumi.set(__self__, "use_cases", use_cases)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[str]]:
"""
Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/)
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter(name="awsKeyInfo")
def aws_key_info(self) -> Optional[pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs']]:
"""
This field is a block and is documented below.
"""
return pulumi.get(self, "aws_key_info")
@aws_key_info.setter
def aws_key_info(self, value: Optional[pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs']]):
pulumi.set(self, "aws_key_info", value)
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> Optional[pulumi.Input[int]]:
"""
(Integer) Time in epoch milliseconds when the customer key was created.
"""
return pulumi.get(self, "creation_time")
@creation_time.setter
def creation_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "creation_time", value)
@property
@pulumi.getter(name="customerManagedKeyId")
def customer_managed_key_id(self) -> Optional[pulumi.Input[str]]:
"""
(String) ID of the encryption key configuration object.
"""
return pulumi.get(self, "customer_managed_key_id")
@customer_managed_key_id.setter
def customer_managed_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_managed_key_id", value)
@property
@pulumi.getter(name="useCases")
def use_cases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
*(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are:
"""
return pulumi.get(self, "use_cases")
@use_cases.setter
def use_cases(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "use_cases", value)
class MwsCustomerManagedKeys(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
aws_key_info: Optional[pulumi.Input[pulumi.InputType['MwsCustomerManagedKeysAwsKeyInfoArgs']]] = None,
creation_time: Optional[pulumi.Input[int]] = None,
customer_managed_key_id: Optional[pulumi.Input[str]] = None,
use_cases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
## Example Usage
> **Note** If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour.
### Customer-managed key for managed services
You must configure this during workspace creation
```python
import pulumi
import pulumi_aws as aws
import pulumi_databricks as databricks
config = pulumi.Config()
databricks_account_id = config.require_object("databricksAccountId")
databricks_managed_services_cmk = aws.iam.get_policy_document(version="2012-10-17",
statements=[
aws.iam.GetPolicyDocumentStatementArgs(
sid="Enable IAM User Permissions",
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=["*"],
)],
actions=["kms:*"],
resources=["*"],
),
aws.iam.GetPolicyDocumentStatementArgs(
sid="Allow Databricks to use KMS key for control plane managed services",
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=["arn:aws:iam::414351767826:root"],
)],
actions=[
"kms:Encrypt",
"kms:Decrypt",
],
resources=["*"],
),
])
managed_services_customer_managed_key = aws.kms.Key("managedServicesCustomerManagedKey", policy=databricks_managed_services_cmk.json)
managed_services_customer_managed_key_alias = aws.kms.Alias("managedServicesCustomerManagedKeyAlias", target_key_id=managed_services_customer_managed_key.key_id)
managed_services = databricks.MwsCustomerManagedKeys("managedServices",
account_id=databricks_account_id,
aws_key_info=databricks.MwsCustomerManagedKeysAwsKeyInfoArgs(
key_arn=managed_services_customer_managed_key.arn,
key_alias=managed_services_customer_managed_key_alias.name,
),
use_cases=["MANAGED_SERVICES"])
```
### Customer-managed key for workspace storage
```python
import pulumi
import pulumi_aws as aws
import pulumi_databricks as databricks
config = pulumi.Config()
databricks_account_id = config.require_object("databricksAccountId")
databricks_cross_account_role = config.require_object("databricksCrossAccountRole")
databricks_storage_cmk = aws.iam.get_policy_document(version="2012-10-17",
statements=[
aws.iam.GetPolicyDocumentStatementArgs(
sid="Enable IAM User Permissions",
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=["*"],
)],
actions=["kms:*"],
resources=["*"],
),
aws.iam.GetPolicyDocumentStatementArgs(
sid="Allow Databricks to use KMS key for DBFS",
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=["arn:aws:iam::414351767826:root"],
)],
actions=[
"kms:Encrypt",
"kms:Decrypt",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:DescribeKey",
],
resources=["*"],
),
aws.iam.GetPolicyDocumentStatementArgs(
sid="Allow Databricks to use KMS key for DBFS (Grants)",
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=["arn:aws:iam::414351767826:root"],
)],
actions=[
"kms:CreateGrant",
"kms:ListGrants",
"kms:RevokeGrant",
],
resources=["*"],
conditions=[aws.iam.GetPolicyDocumentStatementConditionArgs(
test="Bool",
variable="kms:GrantIsForAWSResource",
values=["true"],
)],
),
aws.iam.GetPolicyDocumentStatementArgs(
sid="Allow Databricks to use KMS key for EBS",
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=[databricks_cross_account_role],
)],
actions=[
"kms:Decrypt",
"kms:GenerateDataKey*",
"kms:CreateGrant",
"kms:DescribeKey",
],
resources=["*"],
conditions=[aws.iam.GetPolicyDocumentStatementConditionArgs(
test="ForAnyValue:StringLike",
variable="kms:ViaService",
values=["ec2.*.amazonaws.com"],
)],
),
])
storage_customer_managed_key = aws.kms.Key("storageCustomerManagedKey", policy=databricks_storage_cmk.json)
storage_customer_managed_key_alias = aws.kms.Alias("storageCustomerManagedKeyAlias", target_key_id=storage_customer_managed_key.key_id)
storage = databricks.MwsCustomerManagedKeys("storage",
account_id=databricks_account_id,
aws_key_info=databricks.MwsCustomerManagedKeysAwsKeyInfoArgs(
key_arn=storage_customer_managed_key.arn,
key_alias=storage_customer_managed_key_alias.name,
),
use_cases=["STORAGE"])
```
## Related Resources
The following resources are used in the same context:
* Provisioning Databricks on AWS guide.
* MwsCredentials to configure the cross-account role for creation of new workspaces within AWS.
* MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).
* MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS.
* MwsStorageConfigurations to configure root bucket new workspaces within AWS.
* MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).
## Import
-> **Note** Importing this resource is not currently supported.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/)
:param pulumi.Input[pulumi.InputType['MwsCustomerManagedKeysAwsKeyInfoArgs']] aws_key_info: This field is a block and is documented below.
:param pulumi.Input[int] creation_time: (Integer) Time in epoch milliseconds when the customer key was created.
:param pulumi.Input[str] customer_managed_key_id: (String) ID of the encryption key configuration object.
:param pulumi.Input[Sequence[pulumi.Input[str]]] use_cases: *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are:
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MwsCustomerManagedKeysArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Example Usage
> **Note** If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour.
### Customer-managed key for managed services
You must configure this during workspace creation
```python
import pulumi
import pulumi_aws as aws
import pulumi_databricks as databricks
config = pulumi.Config()
databricks_account_id = config.require_object("databricksAccountId")
databricks_managed_services_cmk = aws.iam.get_policy_document(version="2012-10-17",
statements=[
aws.iam.GetPolicyDocumentStatementArgs(
sid="Enable IAM User Permissions",
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=["*"],
)],
actions=["kms:*"],
resources=["*"],
),
aws.iam.GetPolicyDocumentStatementArgs(
sid="Allow Databricks to use KMS key for control plane managed services",
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=["arn:aws:iam::414351767826:root"],
)],
actions=[
"kms:Encrypt",
"kms:Decrypt",
],
resources=["*"],
),
])
| |
<filename>tests/metrics/test_metrics.py
import unittest
import numpy as np
from sklearn.metrics import accuracy_score, f1_score, fbeta_score, precision_score, recall_score
import torch
from src.metrics import AccuracyMeter, F1Meter, FbetaMeter, PrecisionMeter, RecallMeter, \
MeanIntersectionOverUnionMeter, far, frr, EERMeter
from src.metrics.classification import MultiLabelRecallMeter, MultiLabelPrecisionMeter, MultiLabelF1Meter
__all__ = ["AccuracyTest", "F1ScoreTest", "FBetaScoreTest", "PrecisionTest", "RecallTest",
"MeanIntersectionOverUnionTests", "EERMeterTest"]
class AccuracyTest(unittest.TestCase):
def setUp(self):
self._Y_PRED_MULTICLASS = np.array(
[[1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 1], [0, 0, 1], [0, 0, 1],
[0, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0], [1, 0, 0],
[0, 0, 1], [0, 0, 1], [0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 1, 0]],
dtype=np.float32
)
self._Y_TRUE_MULTICLASS = np.array(
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2],
dtype=np.float32
)
def test_one_iteration(self):
y_pred_sklearn = np.argmax(self._Y_PRED_MULTICLASS, axis=1)
scikit_learn_score = accuracy_score(self._Y_TRUE_MULTICLASS, y_pred_sklearn)
accuracy_test = AccuracyMeter()
accuracy_test.update(self._Y_TRUE_MULTICLASS, self._Y_PRED_MULTICLASS)
self.assertEqual(scikit_learn_score, accuracy_test.on_epoch_end())
def test_multiple_iterations(self):
half = len(self._Y_TRUE_MULTICLASS) // 2
y_pred_sklearn = np.argmax(self._Y_PRED_MULTICLASS, axis=1)
scikit_learn_score = accuracy_score(self._Y_TRUE_MULTICLASS, y_pred_sklearn)
accuracy_test = AccuracyMeter()
accuracy_test.update(self._Y_TRUE_MULTICLASS[:half],
self._Y_PRED_MULTICLASS[:half])
accuracy_test.update(self._Y_TRUE_MULTICLASS[half:],
self._Y_PRED_MULTICLASS[half:])
self.assertEqual(scikit_learn_score, accuracy_test.on_epoch_end())
class ClassificationTest(unittest.TestCase):
def setUp(self) -> None:
# Multi-class classification
self.scores = np.array(
[[0.0783, 0.0866, 0.0957, 0.0783, 0.0709, 0.0957, 0.1169, 0.1292, 0.1427, 0.1057],
[0.0717, 0.0875, 0.0968, 0.1069, 0.1595, 0.1182, 0.0968, 0.0875, 0.0875, 0.0875],
[0.1025, 0.1025, 0.0840, 0.1133, 0.1252, 0.1384, 0.0760, 0.0687, 0.1133, 0.0760]]
)
self.targets = np.array([7, 4, 5])
self.true_pos = np.array([0., 0., 0., 0., 1., 1., 0., 0., 0., 0.])
self.false_pos = np.array([0., 0., 0., 0., 0., 0., 0., 0., 1., 0.])
self.false_neg = np.array([0., 0., 0., 0., 0., 0., 0., 1., 0., 0.])
# Binary classification
self.scores_binary = np.array([0, 0, 0, 1, 1, 1, 1, 1])
self.targets_binary = np.array([0, 1, 1, 0, 1, 1, 1, 1])
self.true_pos_binary = 4
self.false_pos_binary = 1
self.false_neg_binary = 2
# Target class classification
self.target_class = 4
self.scores_target_class = self.scores.argmax(1) == self.target_class
self.targets_target_class = self.targets == self.target_class
self.true_pos_target_class = 1
self.false_pos_target_class = 0
self.false_neg_target_class = 0
class F1ScoreTest(ClassificationTest):
def test_calculate(self):
metric = F1Meter(num_classes=10, average='macro')
tested_metric_result = metric.calculate(self.targets, self.scores)
gt_sklearn_result = f1_score(y_true=self.targets, y_pred=self.scores.argmax(1),
average='macro', labels=np.arange(10))
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_update(self):
metric = F1Meter(num_classes=10, average='macro')
for i in range(len(self.targets)):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
np.testing.assert_almost_equal(metric.true_pos, self.true_pos)
np.testing.assert_almost_equal(metric.false_pos, self.false_pos)
np.testing.assert_almost_equal(metric.false_neg, self.false_neg)
def test_update_binary(self):
metric = F1Meter(average='binary')
for i in range(len(self.targets_binary)):
y_pred = self.scores_binary[None, i]
y_true = self.targets_binary[None, i]
metric.update(y_true, y_pred)
np.testing.assert_almost_equal(metric.true_pos, self.true_pos_binary)
np.testing.assert_almost_equal(metric.false_pos, self.false_pos_binary)
np.testing.assert_almost_equal(metric.false_neg, self.false_neg_binary)
def test_update_target_class(self):
metric = F1Meter(num_classes=10, target_class=self.target_class, average='binary')
for i in range(len(self.targets)):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
np.testing.assert_almost_equal(metric.true_pos, self.true_pos_target_class)
np.testing.assert_almost_equal(metric.false_pos, self.false_pos_target_class)
np.testing.assert_almost_equal(metric.false_neg, self.false_neg_target_class)
def test_on_epoch_end_macro(self):
metric = F1Meter(num_classes=10, average='macro')
metric.true_pos = self.true_pos
metric.false_neg = self.false_neg
metric.false_pos = self.false_pos
gt_sklearn_result = f1_score(self.targets, self.scores.argmax(1),
average='macro', labels=np.arange(10))
self.assertAlmostEqual(metric.on_epoch_end(), gt_sklearn_result)
def test_update_on_epoch_end_macro(self):
metric = F1Meter(num_classes=10, average='macro')
for i in range(len(self.targets)):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
tested_metric_result = metric.on_epoch_end()
gt_sklearn_result = f1_score(y_true=self.targets, y_pred=self.scores.argmax(1),
average='macro', labels=np.arange(10))
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_on_epoch_end_micro(self):
metric = F1Meter(num_classes=10, average='micro')
metric.true_pos = self.true_pos
metric.false_neg = self.false_neg
metric.false_pos = self.false_pos
gt_sklearn_result = f1_score(self.targets, self.scores.argmax(1), average='micro')
self.assertAlmostEqual(metric.on_epoch_end(), gt_sklearn_result)
def test_update_on_epoch_end_micro(self):
metric = F1Meter(num_classes=10, average='micro')
for i in range(len(self.targets)):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
tested_metric_result = metric.on_epoch_end()
gt_sklearn_result = f1_score(y_true=self.targets, y_pred=self.scores.argmax(1), average='micro')
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_on_epoch_end_weighted(self):
metric = F1Meter(num_classes=10, average='weighted')
metric.true_pos = self.true_pos
metric.false_neg = self.false_neg
metric.false_pos = self.false_pos
gt_sklearn_result = f1_score(self.targets, self.scores.argmax(1), average='weighted')
self.assertAlmostEqual(metric.on_epoch_end(), gt_sklearn_result)
def test_update_on_epoch_end_weighted(self):
metric = F1Meter(num_classes=10, average='weighted')
for i in range(len(self.targets)):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
tested_metric_result = metric.on_epoch_end()
gt_sklearn_result = f1_score(y_true=self.targets, y_pred=self.scores.argmax(1), average='weighted')
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_on_epoch_end_binary(self):
metric = F1Meter(average='binary')
metric.true_pos = self.true_pos_binary
metric.false_neg = self.false_neg_binary
metric.false_pos = self.false_pos_binary
gt_sklearn_result = f1_score(self.targets_binary, self.scores_binary, average='binary')
self.assertAlmostEqual(metric.on_epoch_end(), gt_sklearn_result)
def test_update_on_epoch_end_binary(self):
metric = F1Meter(average='binary')
for i in range(len(self.targets_binary)):
y_pred = self.scores_binary[None, i]
y_true = self.targets_binary[None, i]
metric.update(y_true, y_pred)
tested_metric_result = metric.on_epoch_end()
gt_sklearn_result = f1_score(y_true=self.targets_binary, y_pred=self.scores_binary, average='binary')
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_on_epoch_end_target_class(self):
metric = F1Meter(num_classes=10, target_class=self.target_class, average='binary')
metric.true_pos = self.true_pos_target_class
metric.false_neg = self.false_neg_target_class
metric.false_pos = self.false_pos_target_class
gt_sklearn_result = f1_score(y_true=self.targets_target_class, y_pred=self.scores_target_class,
average='binary')
self.assertAlmostEqual(metric.on_epoch_end(), gt_sklearn_result)
def test_update_on_epoch_end_target_class(self):
metric = F1Meter(num_classes=10, target_class=self.target_class, average='binary')
for i in range(len(self.targets)):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
tested_metric_result = metric.on_epoch_end()
gt_sklearn_result = f1_score(y_true=self.targets_target_class, y_pred=self.scores_target_class,
average='binary')
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
class FBetaScoreTest(ClassificationTest):
def setUp(self) -> None:
super().setUp()
self.beta = 2.
def test_calculate(self):
metric = FbetaMeter(num_classes=10, average='macro', beta=self.beta)
tested_metric_result = metric.calculate(self.targets, self.scores)
gt_sklearn_result = fbeta_score(y_true=self.targets, y_pred=self.scores.argmax(1),
beta=self.beta, average='macro', labels=np.arange(10))
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_update(self):
metric = FbetaMeter(num_classes=10, average='macro', beta=self.beta)
for i in range(3):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
np.testing.assert_almost_equal(metric.true_pos, self.true_pos)
np.testing.assert_almost_equal(metric.false_pos, self.false_pos)
np.testing.assert_almost_equal(metric.false_neg, self.false_neg)
def test_update_binary(self):
metric = FbetaMeter(average='binary', beta=self.beta)
for i in range(len(self.targets_binary)):
y_pred = self.scores_binary[None, i]
y_true = self.targets_binary[None, i]
metric.update(y_true, y_pred)
np.testing.assert_almost_equal(metric.true_pos, self.true_pos_binary)
np.testing.assert_almost_equal(metric.false_pos, self.false_pos_binary)
np.testing.assert_almost_equal(metric.false_neg, self.false_neg_binary)
def test_update_target_class(self):
metric = FbetaMeter(num_classes=10, target_class=self.target_class, average='binary', beta=self.beta)
for i in range(len(self.targets)):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
np.testing.assert_almost_equal(metric.true_pos, self.true_pos_target_class)
np.testing.assert_almost_equal(metric.false_pos, self.false_pos_target_class)
np.testing.assert_almost_equal(metric.false_neg, self.false_neg_target_class)
def test_on_epoch_end_macro(self):
metric = FbetaMeter(num_classes=10, average='macro', beta=self.beta)
metric.true_pos = self.true_pos
metric.false_neg = self.false_neg
metric.false_pos = self.false_pos
gt_sklearn_result = fbeta_score(self.targets, self.scores.argmax(1),
beta=self.beta, average='macro', labels=np.arange(10))
self.assertAlmostEqual(metric.on_epoch_end(), gt_sklearn_result)
def test_update_on_epoch_end_macro(self):
metric = FbetaMeter(num_classes=10, average='macro', beta=self.beta)
for i in range(3):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
tested_metric_result = metric.on_epoch_end()
gt_sklearn_result = fbeta_score(y_true=self.targets, y_pred=self.scores.argmax(1),
beta=self.beta, average='macro', labels=np.arange(10))
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_on_epoch_end_micro(self):
metric = FbetaMeter(num_classes=10, average='micro', beta=self.beta)
metric.true_pos = self.true_pos
metric.false_neg = self.false_neg
metric.false_pos = self.false_pos
gt_sklearn_result = fbeta_score(self.targets, self.scores.argmax(1), average='micro',
beta=self.beta)
self.assertAlmostEqual(metric.on_epoch_end(), gt_sklearn_result)
def test_update_on_epoch_end_micro(self):
metric = FbetaMeter(num_classes=10, average='micro', beta=self.beta)
for i in range(3):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
tested_metric_result = metric.on_epoch_end()
gt_sklearn_result = fbeta_score(y_true=self.targets, y_pred=self.scores.argmax(1), average='micro',
beta=self.beta)
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_on_epoch_end_weighted(self):
metric = FbetaMeter(num_classes=10, average='weighted', beta=self.beta)
metric.true_pos = self.true_pos
metric.false_neg = self.false_neg
metric.false_pos = self.false_pos
gt_sklearn_result = fbeta_score(self.targets, self.scores.argmax(1), average='weighted',
beta=self.beta)
self.assertAlmostEqual(metric.on_epoch_end(), gt_sklearn_result)
def test_update_on_epoch_end_weighted(self):
metric = FbetaMeter(num_classes=10, average='weighted', beta=self.beta)
for i in range(3):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
tested_metric_result = metric.on_epoch_end()
gt_sklearn_result = fbeta_score(y_true=self.targets, y_pred=self.scores.argmax(1), average='weighted',
beta=self.beta)
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_on_epoch_end_binary(self):
metric = FbetaMeter(average='binary', beta=self.beta)
metric.true_pos = self.true_pos_binary
metric.false_neg = self.false_neg_binary
metric.false_pos = self.false_pos_binary
gt_sklearn_result = fbeta_score(self.targets_binary, self.scores_binary,
average='binary', beta=self.beta)
self.assertAlmostEqual(metric.on_epoch_end(), gt_sklearn_result)
def test_update_on_epoch_end_binary(self):
metric = FbetaMeter(average='binary', beta=self.beta)
for i in range(len(self.targets_binary)):
y_pred = self.scores_binary[None, i]
y_true = self.targets_binary[None, i]
metric.update(y_true, y_pred)
tested_metric_result = metric.on_epoch_end()
gt_sklearn_result = fbeta_score(y_true=self.targets_binary, y_pred=self.scores_binary,
average='binary', beta=self.beta)
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_on_epoch_end_target_class(self):
metric = FbetaMeter(num_classes=10, target_class=self.target_class, average='binary', beta=self.beta)
metric.true_pos = self.true_pos_target_class
metric.false_neg = self.false_neg_target_class
metric.false_pos = self.false_pos_target_class
gt_sklearn_result = fbeta_score(y_true=self.targets_target_class, y_pred=self.scores_target_class,
average='binary', beta=self.beta)
self.assertAlmostEqual(metric.on_epoch_end(), gt_sklearn_result)
def test_update_on_epoch_end_target_class(self):
metric = FbetaMeter(num_classes=10, target_class=self.target_class, average='binary', beta=self.beta)
for i in range(len(self.targets)):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
tested_metric_result = metric.on_epoch_end()
gt_sklearn_result = fbeta_score(y_true=self.targets_target_class, y_pred=self.scores_target_class,
average='binary', beta=self.beta)
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
class PrecisionTest(ClassificationTest):
def test_calculate(self):
metric = PrecisionMeter(num_classes=10, average='macro')
tested_metric_result = metric.calculate(self.targets, self.scores)
gt_sklearn_result = precision_score(y_true=self.targets, y_pred=self.scores.argmax(1),
average='macro', labels=np.arange(10))
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_update(self):
metric = PrecisionMeter(num_classes=10, average='macro')
for i in range(len(self.targets)):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
np.testing.assert_almost_equal(metric.true_pos, self.true_pos)
np.testing.assert_almost_equal(metric.false_pos, self.false_pos)
np.testing.assert_almost_equal(metric.false_neg, self.false_neg)
def test_update_binary(self):
metric = PrecisionMeter(average='binary')
for i in range(len(self.targets_binary)):
y_pred = self.scores_binary[None, i]
y_true = self.targets_binary[None, i]
metric.update(y_true, y_pred)
np.testing.assert_almost_equal(metric.true_pos, self.true_pos_binary)
np.testing.assert_almost_equal(metric.false_pos, self.false_pos_binary)
np.testing.assert_almost_equal(metric.false_neg, self.false_neg_binary)
def test_update_target_class(self):
metric = PrecisionMeter(num_classes=10, target_class=self.target_class, average='binary')
for i in range(len(self.targets)):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
np.testing.assert_almost_equal(metric.true_pos, self.true_pos_target_class)
np.testing.assert_almost_equal(metric.false_pos, self.false_pos_target_class)
np.testing.assert_almost_equal(metric.false_neg, self.false_neg_target_class)
def test_on_epoch_end_macro(self):
metric = PrecisionMeter(num_classes=10, average='macro')
metric.true_pos = self.true_pos
metric.false_neg = self.false_neg
metric.false_pos = self.false_pos
gt_sklearn_result = precision_score(self.targets, self.scores.argmax(1),
average='macro', labels=np.arange(10))
self.assertAlmostEqual(metric.on_epoch_end(), gt_sklearn_result)
def test_update_on_epoch_end_macro(self):
metric = PrecisionMeter(num_classes=10, average='macro')
for i in range(len(self.targets)):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
tested_metric_result = metric.on_epoch_end()
gt_sklearn_result = precision_score(y_true=self.targets, y_pred=self.scores.argmax(1),
average='macro', labels=np.arange(10))
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_on_epoch_end_micro(self):
metric = PrecisionMeter(num_classes=10, average='micro')
metric.true_pos = self.true_pos
metric.false_neg = self.false_neg
metric.false_pos = self.false_pos
gt_sklearn_result = precision_score(self.targets, self.scores.argmax(1), average='micro')
self.assertAlmostEqual(metric.on_epoch_end(), gt_sklearn_result)
def test_update_on_epoch_end_micro(self):
metric = PrecisionMeter(num_classes=10, average='micro')
for i in range(len(self.targets)):
y_pred = self.scores[None, i]
y_true = self.targets[None, i]
metric.update(y_true, y_pred)
tested_metric_result = metric.on_epoch_end()
gt_sklearn_result = precision_score(y_true=self.targets, y_pred=self.scores.argmax(1), average='micro')
self.assertAlmostEqual(tested_metric_result.item(), gt_sklearn_result.item())
def test_on_epoch_end_weighted(self):
metric = PrecisionMeter(num_classes=10, average='weighted')
metric.true_pos = self.true_pos
metric.false_neg = self.false_neg
metric.false_pos = self.false_pos
gt_sklearn_result = precision_score(self.targets, self.scores.argmax(1), average='weighted')
self.assertAlmostEqual(metric.on_epoch_end(), | |
import logging
import os
import pandas as pd
from brownie import Contract, chain, convert, web3
from pandas.core.frame import DataFrame
from requests import get
from ypricemagic import magic
from ypricemagic.utils.utils import Contract_with_erc20_fallback
from eth_abi import encode_single
moralis = 'https://deep-index.moralis.io/api/v2/'
moralis_key = os.environ['MORALIS_KEY']
headers = {"x-api-key": moralis_key}
def walletdataframe(wallet, block):
# NOTE: Moralis API is returning absurd values for token balances,
# so we will disreagrd balances returned by the API. We only use
# the API to fetch a list of tokens in the wallet. We then use the
# token list to query correct balances from the blockchain.
url = f'{moralis}{wallet}/erc20'
df = pd.DataFrame(get(url, headers=headers).json())
# NOTE: Remove spam tokens
df = df[~df.token_address.isin(SPAM_TOKENS)]
def getcategory(token_address):
try:
return CATEGORY_MAPPINGS[token_address]
except KeyError:
return
def getbalance(token_address):
logging.debug(f'token: {token_address}')
return Contract_with_erc20_fallback(token_address).balanceOf(wallet, block_identifier=block)
def getprice(token_address):
if token_address == '0x27d22a7648e955e510a40bdb058333e9190d12d4': # PPOOL
return magic.get_price('0x0cec1a9154ff802e7934fc916ed7ca50bde6844e',block)
return magic.get_price(token_address, block)
# NOTE: Add some details
df['wallet'] = wallet
df['wallet_label'] = YEARN_WALLETS[wallet]
df['category'] = df['token_address'].apply(getcategory)
# NOTE: Do some maths
df['balance'] = df['token_address'].apply(getbalance) / 10 ** df['decimals'].apply(int)
df['price'] = df['token_address'].apply(getprice)
df['value'] = df['balance'] * df['price']
# NOTE: Get rid of columns we don't need
df = df.drop(columns=['logo','thumbnail','decimals'])
# NOTE: Get rid of tokens with 0 balance
df = df[df['balance'] != 0]
ethbal = web3.eth.get_balance(convert.to_address(wallet), block_identifier = block)/10 ** 18
if ethbal > 0:
ethprice = magic.get_price('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2', block)
return df.append(pd.DataFrame({
'token_address': [None],
'name': ['Ethereum'],
'symbol': ['ETH'],
'balance': [ethbal],
'category': ['ETH'],
'wallet': [wallet],
'wallet_label': [YEARN_WALLETS[wallet]],
'price': [ethprice],
'value': [ethbal * ethprice],
}))
return df
def dataframe(block):
for wallet in YEARN_WALLETS:
print(wallet)
walletdf = walletdataframe(wallet,block)
try:
df = df.append(walletdf)
except UnboundLocalError:
df = walletdf
def appendmore(token_address,amount,wallet,category=False,wallet_label=False):
token = Contract(token_address)
price = magic.get_price(token_address, block)
if category and not wallet_label:
return df.append(pd.DataFrame({
'token_address': [token.address],
'name': [token.name()],
'symbol': [token.symbol()],
'balance': [amount],
'category': [category],
'wallet': [wallet],
'wallet_label': [YEARN_WALLETS[wallet]],
'price': [price],
'value': [amount * price],
}))
if wallet_label and not category:
return df.append(pd.DataFrame({
'token_address': [token.address],
'name': [token.name()],
'symbol': [token.symbol()],
'balance': [amount],
'category': [CATEGORY_MAPPINGS[token.address]],
'wallet': [wallet],
'wallet_label': [wallet_label],
'price': [price],
'value': [amount * price],
}))
if wallet_label and category:
return df.append(pd.DataFrame({
'token_address': [token.address],
'name': [token.name()],
'symbol': [token.symbol()],
'balance': [amount],
'category': [category],
'wallet': [wallet],
'wallet_label': [wallet_label],
'price': [price],
'value': [amount * price],
}))
return df.append(pd.DataFrame({
'token_address': [token.address],
'name': [token.name()],
'symbol': [token.symbol()],
'balance': [amount],
'category': [CATEGORY_MAPPINGS[token.address]],
'wallet': [wallet],
'wallet_label': [YEARN_WALLETS[wallet]],
'price': [price],
'value': [amount * price],
}))
# NOTE: CDP Collat & Debt
# NOTE: Maker
print('fetching MakerDAO data')
proxy_registry = Contract('0x4678f0a6958e4D2Bc4F1BAF7Bc52E8F3564f3fE4')
cdp_manager = Contract('0x5ef30b9986345249bc32d8928B7ee64DE9435E39')
ychad = Contract('0xfeb4acf3df3cdea7399794d0869ef76a6efaff52')
vat = Contract('0x35D1b3F3D7966A1DFe207aa4514C12a259A0492B')
proxy = proxy_registry.proxies(ychad)
cdp = cdp_manager.first(proxy)
urn = cdp_manager.urns(cdp)
ilk = encode_single('bytes32', b'YFI-A')
art = vat.urns(ilk, urn, block_identifier = block).dict()["art"]
rate = vat.ilks(ilk, block_identifier = block).dict()["rate"]
debt = art * rate / 1e27
if debt > 0:
df = appendmore('0x6b175474e89094c44da98b954eedeac495271d0f', -1 * debt / 10 ** 18,'0xfeb4acf3df3cdea7399794d0869ef76a6efaff52',category='Debt')
ink = vat.urns(ilk, urn, block_identifier = block).dict()["ink"]
if ink > 0:
df = appendmore('0x0bc529c00C6401aEF6D220BE8C6Ea1667F6Ad93e', ink / 10 ** 18,urn,wallet_label='Maker')
# NOTE: Unit.xyz
print('fetching Unit.xyz data')
unitVault = Contract("0xb1cff81b9305166ff1efc49a129ad2afcd7bcf19")
debt = unitVault.getTotalDebt('0x0bc529c00C6401aEF6D220BE8C6Ea1667F6Ad93e',ychad, block_identifier = block)
if debt > 0:
df = appendmore('0x1456688345527bE1f37E9e627DA0837D6f08C925', -1 * debt / 10 ** 18,'0xfeb4acf3df3cdea7399794d0869ef76a6efaff52',category='Debt')
bal = unitVault.collaterals('0x0bc529c00C6401aEF6D220BE8C6Ea1667F6Ad93e',ychad, block_identifier = block)
if bal > 0:
df = appendmore('0x0bc529c00C6401aEF6D220BE8C6Ea1667F6Ad93e', bal / 10 ** 18,unitVault.address,wallet_label='Unit.xyz')
# NOTE: This doesn't factor in unspent balance in Blue Citadel, get unspent balance in Blue Citadel
# NOTE: This doesn't factor in bonded KP3R or LP tokens, get bonded KP3R and LP tokens
print('fetching KP3R escrow data')
yearnKp3rWallet = "0x5f0845101857d2a91627478e302357860b1598a1"
escrow = Contract("0xf14cb1feb6c40f26d9ca0ea39a9a613428cdc9ca")
kp3rLPtoken = Contract("0xaf988aff99d3d0cb870812c325c588d8d8cb7de8")
bal = escrow.userLiquidityTotalAmount(yearnKp3rWallet,kp3rLPtoken, block_identifier = block)
if bal > 0:
df = appendmore(kp3rLPtoken.address, bal / 10 ** 18,escrow.address,wallet_label='KP3R Escrow')
return df
def main():
#logging.basicConfig(level=logging.DEBUG)
block = chain[-1].number
print(f'querying data at block {block}')
df = dataframe(block)
path = './reports/treasury_balances.csv'
df.to_csv(path, index=False)
print(f'csv exported to {path}')
return df
def allocations(df=None):
if df is None:
block = chain[-1].number
print(f'querying data at block {block}')
df = dataframe(block)
df = df.groupby(['category'])['value'].sum().reset_index()
sumassets = df.loc[df['value'] > 0, 'value'].sum()
df['pct_of_assets'] = df['value'] / sumassets * 100
df = df.sort_values(['pct_of_assets'],ascending=False)
path = './reports/treasury_allocation.csv'
df.to_csv(path, index=False)
print(f'csv exported to {path}')
def all():
df = main()
allocations(df=df)
YEARN_WALLETS = {
'0xb99a40fce04cb740eb79fc04976ca15af69aaaae': 'Treasury V1'
,'0x93a62da5a14c80f265dabc077fcee437b1a0efde': 'Treasury V2'
,'0xfeb4acf3df3cdea7399794d0869ef76a6efaff52': 'Multisig'
,'0x5f0845101857d2a91627478e302357860b1598a1': 'EOA for Kp3r jobs'
}
SPAM_TOKENS = [
'0xa9517b2e61a57350d6555665292dbc632c76adfe'
,'0xb07de4b2989e180f8907b8c7e617637c26ce2776'
,'<KEY>'
,'0x11068577ae36897ffab0024f010247b9129459e6'
,'<KEY>'
,'0xe256cf1c7caeff4383dabafee6dd53910f97213d'
,'0x53d345839e7df5a6c8cf590c5c703ae255e44816'
,'0x830cbe766ee470b67f77ea62a56246863f75f376'
,'<KEY>'
,'0x9694eed198c1b7ab81addaf36255ea58acf13fab'
,'<KEY>'
# Not spam, still disregard for accounting
# ape tax tokens
,'0xf11b141be4d1985e41c3aea99417e27603f67c4c'
,'<KEY>'
]
CATEGORY_MAPPINGS = {
'0xfc1e690f61efd961294b3e1ce3313fbd8aa4f85d': 'Cash & cash equivalents'
,'0x6ee0f7bb50a54ab5253da0667b0dc2ee526c30a8': 'Cash & cash equivalents'
,'0x625ae63000f46200499120b906716420bd059240': 'Cash & cash equivalents'
,'0x6b175474e89094c44da98b954eedeac495271d0f': 'Cash & cash equivalents'
,'0xdac17f958d2ee523a2206206994597c13d831ec7': 'Cash & cash equivalents'
,'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48': 'Cash & cash equivalents'
,'0x9ca85572e6a3ebf24dedd195623f188735a5179f': 'Cash & cash equivalents'
,'0x25212df29073fffa7a67399acefc2dd75a831a1a': 'Cash & cash equivalents'
,'0xf8768814b88281de4f532a3beefa5b85b69b9324': 'Cash & cash equivalents'
,'0x1aef73d49dedc4b1778d0706583995958dc862e6': 'Cash & cash equivalents'
,'0xfd2a8fa60abd58efe3eee34dd494cd491dc14900': 'Cash & cash equivalents'
,'0x3a664ab939fd8482048609f652f9a0b0677337b9': 'Cash & cash equivalents'
,'0xc25a3a3b969415c80451098fa907ec722572917f': 'Cash & cash equivalents'
,'0x845838df265dcd2c412a1dc9e959c7d08537f8a2': 'Cash & cash equivalents'
,'0x39caf13a104ff567f71fd2a4c68c026fdb6e740b': 'Cash & cash equivalents'
,'0x2994529c0652d127b7842094103715ec5299bbed': 'Cash & cash equivalents'
,'0xe2f2a5c287993345a840db3b0845fbc70f5935a5': 'Cash & cash equivalents'
,'0x7da96a3891add058ada2e826306d812c638d87a7': 'Cash & cash equivalents'
,'0x63739d137eefab1001245a8bd1f3895ef3e186e7': 'Cash & cash equivalents'
,'0x5a770dbd3ee6baf2802d29a901ef11501c44797a': 'Cash & cash equivalents'
,'0xa74d4b67b3368e83797a35382afb776baae4f5c8': 'Cash & cash equivalents'
,'0x30fcf7c6cdfc46ec237783d94fc78553e79d4e9c': 'Cash & cash equivalents'
,'0xb4d1be44bff40ad6e506edf43156577a3f8672ec': 'Cash & cash equivalents'
,'0x4962b6c40b5e9433e029c5c423f6b1ce7ff28b0f': 'Cash & cash equivalents'
,'0x84e13785b5a27879921d6f685f041421c7f482da': 'Cash & cash equivalents'
,'0x02d341ccb60faaf662bc0554d13778015d1b285c': 'Cash & cash equivalents'
,'0xd6ea40597be05c201845c0bfd2e96a60bacde267': 'Cash & cash equivalents'
,'0xc116df49c02c5fd147de25baa105322ebf26bd97': 'Cash & cash equivalents'
,'0xa5ca62d95d24a4a350983d5b8ac4eb8638887396': 'Cash & cash equivalents'
,'0x194ebd173f6cdace046c53eacce9b953f28411d1': 'Cash & cash equivalents'
,'0x5f18c75abdae578b483e5f43f12a39cf75b973a9': 'Cash & cash equivalents'
,'0x8cc94ccd0f3841a468184aca3cc478d2148e1757': 'Cash & cash equivalents'
,'0xda816459f1ab5631232fe5e97a05bbbb94970c95': 'Cash & cash equivalents'
,'0x0000000000085d4780b73119b644ae5ecd22b376': 'Cash & cash equivalents'
,'0x94e131324b6054c0d789b190b2dac504e4361b53': 'Cash & cash equivalents'
,'0xb4ada607b9d6b2c9ee07a275e9616b84ac560139': 'Cash & cash equivalents'
,'0x0fcdaedfb8a7dfda2e9838564c5a1665d856afdf': 'Cash & cash equivalents'
,'0x19d3364a399d251e894ac732651be8b0e4e85001': 'Cash & cash equivalents'
,'0x7eb40e450b9655f4b3cc4259bcc731c63ff55ae6': 'Cash & cash equivalents'
,'0xdf5e0e81dff6faf3a7e52ba697820c5e32d806a8': 'Cash & cash equivalents'
,'0x2a38b9b0201ca39b17b460ed2f11e4929559071e': 'Cash & cash equivalents'
,'0x054af22e1519b020516d72d749221c24756385c9': 'Cash & cash equivalents'
,'0x5dbcf33d8c2e976c6b560249878e6f1491bca25c': 'Cash & cash equivalents'
,'0x5fa5b62c8af877cb37031e0a3b2f34a78e3c56a6': 'Cash & cash equivalents'
,'0x8ee57c05741aa9db947a744e713c15d4d19d8822': 'Cash & cash equivalents'
,'0xaf322a2edf31490250fdeb0d712621484b09abb6': 'Cash & cash equivalents'
,'0x6ede7f19df5df6ef23bd5b9cedb651580bdf56ca': 'Cash & cash equivalents'
,'0x6c3f90f043a72fa612cbac8115ee7e52bde6e490': 'Cash & cash equivalents'
,'0x1c6a9783f812b3af3abbf7de64c3cd7cc7d1af44': 'Cash & cash equivalents'
,'0x3b3ac5386837dc563660fb6a0937dfaa5924333b': 'Cash & cash equivalents'
,'0xd2967f45c4f384deea880f807be904762a3dea07': 'Cash & cash equivalents'
,'0xc4daf3b5e2a9e93861c3fbdd25f1e943b8d87417': 'Cash & cash equivalents'
,'0x5b5cfe992adac0c9d48e05854b2d91c73a003858': 'Cash & cash equivalents'
,'0x27b7b1ad7288079a66d12350c828d3c00a6f07d7': 'Cash & cash equivalents'
,'0xdb25f211ab05b1c97d595516f45794528a807ad8': 'Cash & cash equivalents'
,'0x9ba60ba98413a60db4c651d4afe5c937bbd8044b': 'Cash & cash equivalents'
,'0xacd43e627e64355f1861cec6d3a6688b31a6f952': 'Cash & cash equivalents'
,'0x056fd409e1d7a124bd7017459dfea2f387b6d5cd': 'Cash & cash equivalents'
,'0x4f3e8f405cf5afc05d68142f3783bdfe13811522': 'Cash & cash equivalents'
,'0xed279fdd11ca84beef15af5d39bb4d4bee23f0ca': 'Cash & cash equivalents'
,'0x4b5bfd52124784745c1071dcb244c6688d2533d3': 'Cash & cash equivalents'
,'0x873fb544277fd7b977b196a826459a69e27ea4ea': 'Cash & cash equivalents'
,'0xfd0877d9095789caf24c98f7cce092fa8e120775': 'Cash & cash equivalents'
,'0x7158c1bee7a0fa5bd6affc77b2309991d7adcdd4': 'Cash & cash equivalents'
,'0x3b96d491f067912d18563d56858ba7d6ec67a6fa': 'Cash & cash equivalents'
,'0x2dfb14e32e2f8156ec15a2c21c3a6c053af52be8': 'Cash & cash equivalents'
,'0x0bc529c00C6401aEF6D220BE8C6Ea1667F6Ad93e': 'YFI'
,'0x0bc529c00c6401aef6d220be8c6ea1667f6ad93e': 'YFI'
,'0xe14d13d8b3b85af791b2aadd661cdbd5e6097db1': 'YFI'
,'0xaa17a236f2badc98ddc0cf999abb47d47fc0a6cf': 'ETH'
,'0xbfedbcbe27171c418cdabc2477042554b1904857': 'ETH'
,'0xac333895ce1a73875cf7b4ecdc5a743c12f3d82b': 'ETH'
,'0xdcd90c7f6324cfa40d7169ef80b12031770b4325': 'ETH'
,'0xa9fe4601811213c340e850ea305481aff02f5b28': 'ETH'
,'0x132d8d2c76db3812403431facb00f3453fc42125': 'ETH'
,'0xa258c4606ca8206d8aa700ce2143d7db854d168c': 'ETH'
,'0x53a901d48795c58f485cbb38df08fa96a24669d5': 'ETH'
,'0x986b4aff588a109c09b50a03f42e4110e29d353f': 'ETH'
,'0xa3d87fffce63b53e0d54faa1cc983b7eb0b74a9c': 'ETH'
,'0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2': 'ETH'
,'0x410e3e86ef427e30b9235497143881f717d93c2a': 'BTC'
,'0x075b1bb99792c9e1041ba13afef80c91a1e70fb3': 'BTC'
,'0xb19059ebb43466c323583928285a49f558e572fd': 'BTC'
,'0x8fa3a9ecd9efb07a8ce90a6eb014cf3c0e3b32ef': 'BTC'
,'0x0e8a7717a4fd7694682e7005957dd5d7598bf14a': 'BTC'
,'0xbf7aa989192b020a8d3e1c65a558e123834325ca': 'BTC'
,'0x8414db07a7f743debafb402070ab01a4e0d2e45e': 'BTC'
,'0x49849c98ae39fff122806c06791fa73784fb3675': 'BTC'
,'0x2fe94ea3d5d4a175184081439753de15aef9d614': 'BTC'
,'0x625b7df2fa8abe21b0a976736cda4775523aed1e': 'BTC'
,'0xde5331ac4b3630f94853ff322b66407e0d6331e8': 'BTC'
,'0xe9dc63083c464d6edccff23444ff3cfc6886f6fb': 'BTC'
,'0x23d3d0f1c697247d5e0a9efb37d8b0ed0c464f7f': 'BTC'
,'0xa696a63cc78dffa1a63e9e50587c197387ff6c7e': 'BTC'
,'0x7047f90229a057c13bf847c0744d646cfb6c9e1a': 'BTC'
,'0x3c5df3077bcf800640b5dae8c91106575a4826e6': 'BTC'
,'0x410e3e86ef427e30b9235497143881f717d93c2a': 'BTC'
,'0x2260fac5e5542a773aa44fbcfedf7c193bc2c599': 'BTC'
,'0xa696a63cc78dffa1a63e9e50587c197387ff6c7e': 'BTC'
,'0xa64bd6c70cb9051f6a9ba1f163fdc07e0dfb5f84': 'Other short term assets'
,'0x0cec1a9154ff802e7934fc916ed7ca50bde6844e': 'Other short term assets'
,'0xf2db9a7c0acd427a680d640f02d90f6186e71725': 'Other short term assets'
,'0x7356f09c294cb9c6428ac7327b24b0f29419c181': 'Other short term assets'
,'0x3d980e50508cfd41a13837a60149927a11c03731': 'Other short term assets'
,'0x671a912c10bba0cfa74cfc2d6fba9ba1ed9530b2': 'Other short term assets'
,'0xac1c90b9c76d56ba2e24f3995f7671c745f8f308': 'Other short term assets'
,'0x497590d2d57f05cf8b42a36062fa53ebae283498': 'Other short term assets'
,'0xfbeb78a723b8087fd2ea7ef1afec93d35e8bed42': 'Other short term assets'
,'0x6d765cbe5bc922694afe112c140b8878b9fb0390': 'Other short term assets'
,'0xcee60cfa923170e4f8204ae08b4fa6a3f5656f3a': 'Other short term assets'
,'0xe537b5cc158eb71037d4125bdd7538421981e6aa': 'Other short term assets'
,'0xf29ae508698bdef169b89834f76704c3b205aedf': 'Other short term assets'
,'0x56a5fd5104a4956898753dfb060ff32882ae0eb4': 'Other short term assets'
,'0xb8c3b7a2a618c552c23b1e4701109a9e756bab67': 'Other short term assets'
,'0x27eb83254d900ab4f9b15d5652d913963fec35e3': 'Other short term assets'
,'0x27d22a7648e955e510a40bdb058333e9190d12d4': 'Other short term assets'
,'0xca3d75ac011bf5ad07a98d02f18225f9bd9a6bdf': 'Other short term assets'
,'0x4da27a545c0c5b758a6ba100e3a049001de870f5': 'Other short term assets'
,'0x7095472d01a964e50349aa12ce4d5263af77e0d7': 'Other short term assets'
,'0x3a68bc59c500de3d5239b5e7f5bdaa1a3bcabba3': 'Other short term assets'
,'0xc011a73ee8576fb46f5e1c5751ca3b9fe0af2a6f': 'Other short term assets'
,'0x111111111117dc0aa78b770fa6a738034120c302': 'Other short term assets'
,'0x514910771af9ca656af840dff83e8264ecf986ca': 'Other short term assets'
,'0x0d4ea8536f9a13e4fba16042a46c30f092b06aa5': 'Other short term assets'
,'0xd9788f3931ede4d5018184e198699dc6d66c1915': 'Other short term assets'
,'0x4a3fe75762017db0ed73a71c9a06db7768db5e66': 'Other short term assets'
,'0x92e187a03b6cd19cb6af293ba17f2745fd2357d5': 'Other short term assets'
,'0x2ba592f78db6436527729929aaf6c908497cb200': 'Other short term assets'
,'0x090185f2135308bad17527004364ebcc2d37e5f6': 'Other short term assets'
,'0x5a98fcbea516cf06857215779fd812ca3bef1b32': 'Other short term assets'
,'0x63125c0d5cd9071de9a1ac84c400982f41c697ae': 'Other short term assets'
,'0x1ceb5cb57c4d4e2b2433641b95dd330a33185a44': 'Other long term assets'
,'0xaf988afF99d3d0cb870812C325C588D8D8CB7De8': 'Other long term assets'
,'0x1abbac88b8f47d46a3d822efa75f64a15c25966f': 'Junk that somehow has value'
,'0x55a290f08bb4cae8dcf1ea5635a3fcfd4da60456': 'Junk that somehow has value'
,'0xa00c7a61bcbb3f0abcafacd149a3008a153b2dab': 'Junk that somehow has value'
,'0x0a24bb4842c301276c65086b5d78d5c872993c72': | |
r"""
根据条件将当前 OID 选择集和另一个 OID 选择集进行集合操作,并得到新的 OID 选择集对象:type pSrcSet: :py:class:`GsSelectionSet`
:param pSrcSet: 需要合并操作的选择集:type eOperation: int
:param eOperation: 合并操作类型 :rtype: GsSmarterPtr< GsSelectionSet >
:return: 返回新的选择集
"""
return _gskernel.GsSelectionSet_Combine(self, pSrcSet, eOperation)
__swig_destroy__ = _gskernel.delete_GsSelectionSet
# Register GsSelectionSet in _gskernel:
_gskernel.GsSelectionSet_swigregister(GsSelectionSet)
class GsTileClass(GsGeoDataRoom):
r""" 瓦片数据集 code{.cpp} GsTilePtr ptrTile = ptrTileClass->CreateTile(); ptrTile->Row(r); ptrTile->Level(l); ptrTile->Col(c); ptrTile->Store(); GsTileCursorPtr ptrCursor=ptrTileClass->Search(); GsTilePtr ptrTile = ptrCursor->Next(); do{ DoSomeThing(ptrTile); }while(ptrCursor->Next(ptrTile.p); endcode"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gskernel.delete_GsTileClass
def CreateTile(self) -> "GsSmarterPtr< GsTile >":
r"""
创建一个新的Tile对象 :rtype: GsSmarterPtr< GsTile >
:return: 返回Tile对象的指针
"""
return _gskernel.GsTileClass_CreateTile(self)
def Search(self, *args) -> "GsSmarterPtr< GsTileCursor >":
r"""
*Overload 1:*
检索所有的瓦片 :rtype: GsSmarterPtr< GsTileCursor >
:return: 返回瓦片游标
|
*Overload 2:*
检索某个级别到某个级别的瓦片 :type nStartLevel: int
:param nStartLevel: 开始的级别 :type nEndLevel: int
:param nEndLevel: 结束的级别 :rtype: GsSmarterPtr< GsTileCursor >
:return: 返回瓦片游标
|
*Overload 3:*
检索某个级别某个行列范围的瓦片 :type nLevel: int
:param nLevel: 检索的级别 :type nStartRow: int
:param nStartRow: 开始行 :type nStartCol: int
:param nStartCol: 开始列 :type nEndRow: int
:param nEndRow: 结束行 :type nEndCol: int
:param nEndCol: 开始列 :rtype: GsSmarterPtr< GsTileCursor >
:return: 返回瓦片游标
"""
return _gskernel.GsTileClass_Search(self, *args)
def Pyramid(self) -> "GsPyramid *":
r"""
获取瓦片类的金字塔。 :rtype: :py:class:`GsPyramid`
:return: 返回金字塔对象指针
"""
return _gskernel.GsTileClass_Pyramid(self)
def ChangePyramid(self, pPyramid: 'GsPyramid') -> "bool":
r"""
修改瓦片类的金字塔 :type pPyramid: :py:class:`GsPyramid`
:param pPyramid: 要修改的金字塔对象 :rtype: boolean
:return: 返回修改是否成功
"""
return _gskernel.GsTileClass_ChangePyramid(self, pPyramid)
def TileColumnInfo(self) -> "GsTileColumnInfo":
r"""
获取瓦片基本元信息:rtype: :py:class:`GsTileColumnInfo`
:return: 返回原信息对象
"""
return _gskernel.GsTileClass_TileColumnInfo(self)
def TileCount(self, *args) -> "long long":
r"""
*Overload 1:*
获取所有瓦片的数量:rtype: int
:return: 返回瓦片总数
|
*Overload 2:*
获取某个级别瓦片的数量 :type nStartLevel: int
:param nStartLevel: 开始级别 :type nEndLevel: int
:param nEndLevel: 结束级别:rtype: int
:return: 返回该级别瓦片总数
|
*Overload 3:*
检索某个级别某个行列范围的瓦片 :type nLevel: int
:param nLevel: 检索的级别 :type nStartRow: int
:param nStartRow: 开始行 :type nStartCol: int
:param nStartCol: 开始列 :type nEndRow: int
:param nEndRow: 结束行 :type nEndCol: int
:param nEndCol: 开始列:rtype: int
:return: 返回该级别瓦片总数
"""
return _gskernel.GsTileClass_TileCount(self, *args)
def Tile(self, *args) -> "bool":
r"""
*Overload 1:*
根据瓦片行列号检索一个瓦片 :type nLevel: int
:param nLevel: 瓦片的级别 :type nRow: int
:param nRow: 瓦片的行 :type nCol: int
:param nCol: 瓦片的列:rtype: GsSmarterPtr< GsTile >
:return: 返回该瓦片或者空
|
*Overload 2:*
根据瓦片行列号检索一个瓦片 :type nLevel: int
:param nLevel: 瓦片的级别 :type nRow: int
:param nRow: 瓦片的行 :type nCol: int
:param nCol: 瓦片的列 :type pTile: :py:class:`GsTile`
:param pTile: 瓦片:rtype: boolean
:return: 返回瓦片是否存在
"""
return _gskernel.GsTileClass_Tile(self, *args)
@staticmethod
def CanDowncast(b: 'GsGeoDataRoom') -> "bool":
return _gskernel.GsTileClass_CanDowncast(b)
@staticmethod
def DowncastTo(b: 'GsGeoDataRoom') -> "GsSmarterPtr< GsTileClass >":
return _gskernel.GsTileClass_DowncastTo(b)
# Register GsTileClass in _gskernel:
_gskernel.GsTileClass_swigregister(GsTileClass)
def GsTileClass_CanDowncast(b: 'GsGeoDataRoom') -> "bool":
return _gskernel.GsTileClass_CanDowncast(b)
def GsTileClass_DowncastTo(b: 'GsGeoDataRoom') -> "GsSmarterPtr< GsTileClass >":
return _gskernel.GsTileClass_DowncastTo(b)
class GsTMSTileClass(GsTileClass):
r""" 基于瓦片化的地图服务的瓦片数据集封装。"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gskernel.delete_GsTMSTileClass
def TileType(self, *args) -> "void":
r"""
*Overload 1:*
获取瓦片类型
|
*Overload 2:*
设置瓦片类型
"""
return _gskernel.GsTMSTileClass_TileType(self, *args)
def UrlTemplate(self, *args) -> "void":
r"""
*Overload 1:*
获取TMS的url地址模板
|
*Overload 2:*
设置TMS的url地址模板 地址模板如http://xxx.server.com/${Level}/${Row}/${Col} ${Level}代表瓦片级别 ${Row}代表瓦片行 ${Col}代表瓦片列
"""
return _gskernel.GsTMSTileClass_UrlTemplate(self, *args)
def Cache(self, *args) -> "void":
r"""
*Overload 1:*
获取缓存瓦片数据的TileClass对象
|
*Overload 2:*
设置缓存瓦片数据的TileClass
"""
return _gskernel.GsTMSTileClass_Cache(self, *args)
@staticmethod
def CanDowncast(b: 'GsTileClass') -> "bool":
return _gskernel.GsTMSTileClass_CanDowncast(b)
@staticmethod
def DowncastTo(b: 'GsTileClass') -> "GsSmarterPtr< GsTMSTileClass >":
return _gskernel.GsTMSTileClass_DowncastTo(b)
# Register GsTMSTileClass in _gskernel:
_gskernel.GsTMSTileClass_swigregister(GsTMSTileClass)
def GsTMSTileClass_CanDowncast(b: 'GsTileClass') -> "bool":
return _gskernel.GsTMSTileClass_CanDowncast(b)
def GsTMSTileClass_DowncastTo(b: 'GsTileClass') -> "GsSmarterPtr< GsTMSTileClass >":
return _gskernel.GsTMSTileClass_DowncastTo(b)
class GsRowClass(GsDataRoom):
r""" 二维表数据集"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gskernel.delete_GsRowClass
def Search(self, *args) -> "GsSmarterPtr< GsRowCursor >":
r"""
*Overload 1:*
根据查询过滤条件查询:type pFilter: :py:class:`GsQueryFilter`
:param pFilter: 检索过滤条件:rtype: GsSmarterPtr< GsRowCursor >
:return: 返回检索结果的行游标对象
|
*Overload 2:*
根据Where子句进行查询:type strWhere: string
:param strWhere: 要检索的where子句:rtype: GsSmarterPtr< GsRowCursor >
:return: 返回检索结果的行游标对象
"""
return _gskernel.GsRowClass_Search(self, *args)
def Fields(self) -> "GsFields":
r"""
获取二维表的字段信息:rtype: :py:class:`GsFields`
:return: 字段信息
"""
return _gskernel.GsRowClass_Fields(self)
def CreateRow(self) -> "GsSmarterPtr< GsRow >":
r"""
创建的一个新的属性对象:rtype: GsSmarterPtr< GsRow >
:return: 返回新建的属性对象指针
"""
return _gskernel.GsRowClass_CreateRow(self)
def Row(self, *args) -> "bool":
r"""
*Overload 1:*
根据输入的oid获取属性对象:rtype: GsSmarterPtr< GsRow >
:return: 返回属性对象
|
*Overload 2:*
根据输入的oid获取属性对象 :type nOID: int
:param nOID: 输入的OID :type pRow: :py:class:`GsRow`
:param pRow: 传入属性对象指针,用于接收属性数据,不能为空:rtype: boolean
:return: 返回是否成功获取属性对象。
"""
return _gskernel.GsRowClass_Row(self, *args)
def RowCount(self, pFilter: 'GsQueryFilter'=None) -> "long long":
r"""
根据查询条件获取属性的数量 :type pFilter: :py:class:`GsQueryFilter`
:param pFilter: 空间或者属性的查询条件:rtype: int
:return: 返回满足查询条件的属性的数量
"""
return _gskernel.GsRowClass_RowCount(self, pFilter)
@staticmethod
def CanDowncast(b: 'GsDataRoom') -> "bool":
return _gskernel.GsRowClass_CanDowncast(b)
@staticmethod
def DowncastTo(b: 'GsDataRoom') -> "GsSmarterPtr< GsRowClass >":
return _gskernel.GsRowClass_DowncastTo(b)
# Register GsRowClass in _gskernel:
_gskernel.GsRowClass_swigregister(GsRowClass)
def GsRowClass_CanDowncast(b: 'GsDataRoom') -> "bool":
return _gskernel.GsRowClass_CanDowncast(b)
def GsRowClass_DowncastTo(b: 'GsDataRoom') -> "GsSmarterPtr< GsRowClass >":
return _gskernel.GsRowClass_DowncastTo(b)
class GsFeatureClass(GsGeoDataRoom):
r""" 矢量地物类 GIS点、线、面、注记类型的矢量数据读写对象"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gskernel.delete_GsFeatureClass
def Fields(self) -> "GsFields":
r"""
获取地物类的字段信息:rtype: :py:class:`GsFields`
:return: 字段信息
"""
return _gskernel.GsFeatureClass_Fields(self)
def CreateFeature(self) -> "GsSmarterPtr< GsFeature >":
r"""
创建的一个新的地物对象:rtype: GsSmarterPtr< GsFeature >
:return: 返回新建的地物对象指针
"""
return _gskernel.GsFeatureClass_CreateFeature(self)
def Feature(self, *args) -> "bool":
r"""
*Overload 1:*
根据输入的oid获取地物对象:rtype: GsSmarterPtr< GsFeature >
:return: 返回地物对象
|
*Overload 2:*
根据输入的oid获取地物对象 :type nOID: int
:param nOID: 输入的OID :type pFea: :py:class:`GsFeature`
:param pFea: 传入地物对象指针,用于接收地物数据,不能为空:rtype: boolean
:return: 返回是否成功获取地物对象。
"""
return _gskernel.GsFeatureClass_Feature(self, *args)
def Search(self, *args) -> "GsSmarterPtr< GsFeatureCursor >":
r"""
*Overload 1:*
根据查询过滤条件粗查:type pFilter: :py:class:`GsQueryFilter`
:param pFilter: 检索过滤条件可以是GsSpatialQueryFilter或者GsQueryFilter类型:rtype: GsSmarterPtr< GsFeatureCursor >
:return: 返回检索结果的地物游标对象
|
*Overload 2:*
根据几何空间范围进行粗查:type pGeo: :py:class:`GsGeometry`
:param pGeo: 要检索的几何空间范围:rtype: GsSmarterPtr< GsFeatureCursor >
:return: 返回检索结果的地物游标对象
|
*Overload 3:*
根据Where子句进行粗查:type strWhere: string
:param strWhere: 要检索的where子句:rtype: GsSmarterPtr< GsFeatureCursor >
:return: 返回检索结果的地物游标对象
|
*Overload 4:*
根据where子句和几何空间范围进行粗查:type pGeo: :py:class:`GsGeometry`
:param pGeo: 要检索的空间范围:type strWhere: string
:param strWhere: 要检索的where子句:rtype: GsSmarterPtr< GsFeatureCursor >
:return: 返回检索结果的地物游标对象
"""
return _gskernel.GsFeatureClass_Search(self, *args)
def GeometryType(self) -> "GsGeometryType":
r"""
地物类的几何类型:rtype: int
:return: 返回地物类的几何类型
"""
return _gskernel.GsFeatureClass_GeometryType(self)
def CreateSpatialIndex(self) -> "bool":
r"""
创建空间索引:rtype: boolean
:return: 返回创建索引是否成功
"""
return _gskernel.GsFeatureClass_CreateSpatialIndex(self)
def DeleteSpatialIndex(self) -> "bool":
r"""
删除空间索引:rtype: boolean
:return: 返回删除索引是否成功
"""
return _gskernel.GsFeatureClass_DeleteSpatialIndex(self)
def HasSpatialIndex(self) -> "bool":
r"""
是否存在空间索引:rtype: boolean
:return: 返回空间索引是否存在
"""
return _gskernel.GsFeatureClass_HasSpatialIndex(self)
def Select(self, pFilter: 'GsQueryFilter'=None) -> "GsSmarterPtr< GsSelectionSet >":
r"""
根据根据where子句和几何空间范围进行选择:rtype: GsSmarterPtr< GsSelectionSet >
:return: 返回删除索引是否成功
"""
return _gskernel.GsFeatureClass_Select(self, pFilter)
def GeometryColumnInfo(self) -> "GsGeometryColumnInfo":
r"""
获取基本几何信息:rtype: :py:class:`GsGeometryColumnInfo`
:return: 返回基本几何信息对象
"""
return _gskernel.GsFeatureClass_GeometryColumnInfo(self)
def FeatureCount(self, pFilter: 'GsQueryFilter'=None) -> "long long":
r"""
根据查询条件获取地物的数量 :type pFilter: :py:class:`GsQueryFilter`
:param pFilter: 空间或者属性的查询条件:rtype: int
:return: 返回满足查询条件的地物的数量
"""
return _gskernel.GsFeatureClass_FeatureCount(self, pFilter)
@staticmethod
def CanDowncast(b: 'GsGeoDataRoom') -> "bool":
return _gskernel.GsFeatureClass_CanDowncast(b)
@staticmethod
def DowncastTo(b: 'GsGeoDataRoom') -> "GsSmarterPtr< GsFeatureClass >":
return _gskernel.GsFeatureClass_DowncastTo(b)
# Register GsFeatureClass in _gskernel:
_gskernel.GsFeatureClass_swigregister(GsFeatureClass)
def GsFeatureClass_CanDowncast(b: 'GsGeoDataRoom') -> "bool":
return _gskernel.GsFeatureClass_CanDowncast(b)
def GsFeatureClass_DowncastTo(b: 'GsGeoDataRoom') -> "GsSmarterPtr< GsFeatureClass >":
return _gskernel.GsFeatureClass_DowncastTo(b)
class GsProxyFeatureClassIO(GsRefObject):
r""" 代理地物类回调"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
if self.__class__ == GsProxyFeatureClassIO:
_self = None
else:
_self = self
_gskernel.GsProxyFeatureClassIO_swiginit(self, _gskernel.new_GsProxyFeatureClassIO(_self, ))
__swig_destroy__ = _gskernel.delete_GsProxyFeatureClassIO
def Search(self, pFilter: 'GsQueryFilter') -> "GsFeatureDataIO *":
r""" 搜索一个地物类返回数据IO"""
return _gskernel.GsProxyFeatureClassIO_Search(self, pFilter)
def __disown__(self):
self.this.disown()
_gskernel.disown_GsProxyFeatureClassIO(self)
return weakref.proxy(self)
# Register GsProxyFeatureClassIO in _gskernel:
_gskernel.GsProxyFeatureClassIO_swigregister(GsProxyFeatureClassIO)
class GsProxyFeatureClass(GsFeatureClass):
r""" 用于用户实现将任意数据源实现为地物类 通过用户实现简单的回调接口实现读取任意数据的能力"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, name: 'char const *'):
_gskernel.GsProxyFeatureClass_swiginit(self, _gskernel.new_GsProxyFeatureClass(name))
__swig_destroy__ = _gskernel.delete_GsProxyFeatureClass
def DataIO(self, *args) -> "GsProxyFeatureClassIO *":
r"""
*Overload 1:*
设置数据回调IO对象
|
*Overload 2:*
获取数据回调IO对象
"""
return _gskernel.GsProxyFeatureClass_DataIO(self, *args)
def Fields(self, *args) -> "GsFields":
r"""
*Overload 1:*
获取地物类的字段信息:rtype: void
:return: 字段信息
|
*Overload 2:*
获取地物类的字段信息:rtype: :py:class:`GsFields`
:return: 字段信息
"""
return _gskernel.GsProxyFeatureClass_Fields(self, *args)
def CreateFeature(self) -> "GsSmarterPtr< GsFeature >":
r"""
创建的一个新的地物对象:rtype: GsSmarterPtr< GsFeature >
:return: 返回新建的地物对象指针
"""
return _gskernel.GsProxyFeatureClass_CreateFeature(self)
def Feature(self, *args) -> "bool":
r"""
*Overload 1:*
根据输入的oid获取地物对象:rtype: GsSmarterPtr< GsFeature >
:return: 返回地物对象
|
*Overload 2:*
根据输入的oid获取地物对象 :type nOID: int
:param nOID: 输入的OID :type pFea: :py:class:`GsFeature`
:param pFea: 传入地物对象指针,用于接收地物数据,不能为空:rtype: boolean
:return: 返回是否成功获取地物对象
"""
return _gskernel.GsProxyFeatureClass_Feature(self, *args)
def Search(self, pFilter: 'GsQueryFilter'=None) -> "GsSmarterPtr< GsFeatureCursor >":
r"""
根据查询过滤条件粗查:type pFilter: :py:class:`GsQueryFilter`
:param pFilter: 检索过滤条件可以是GsSpatialQueryFilter或者GsQueryFilter类型:rtype: GsSmarterPtr< GsFeatureCursor >
:return: 返回检索结果的地物游标对象
"""
return _gskernel.GsProxyFeatureClass_Search(self, pFilter)
def GeometryType(self) -> "GsGeometryType":
r"""
地物类的几何类型:rtype: int
:return: 返回地物类的几何类型
"""
return _gskernel.GsProxyFeatureClass_GeometryType(self)
def CreateSpatialIndex(self) -> "bool":
r"""
创建空间索引:rtype: boolean
:return: 返回创建索引是否成功
"""
return _gskernel.GsProxyFeatureClass_CreateSpatialIndex(self)
def DeleteSpatialIndex(self) -> "bool":
r"""
删除空间索引:rtype: boolean
:return: 返回删除索引是否成功
"""
return _gskernel.GsProxyFeatureClass_DeleteSpatialIndex(self)
def HasSpatialIndex(self) -> "bool":
r"""
是否存在空间索引:rtype: boolean
:return: 返回空间索引是否存在
"""
return _gskernel.GsProxyFeatureClass_HasSpatialIndex(self)
def Select(self, | |
<reponame>sagnik/hub
"""Utilities for running MRC
This includes some utilities for MRC problems, including an iterable PyTorch loader that doesnt require examples
to fit in core memory.
"""
import numpy as np
from eight_mile.utils import Offsets, Average, listify
from eight_mile.pytorch.layers import WithDropout, Dense
from baseline.utils import get_model_file, get_metric_cmp
from baseline.reader import register_reader
from baseline.model import register_model
import torch
import six
from torch.utils.data import SequentialSampler, DataLoader
from torch.utils.data.dataset import IterableDataset, TensorDataset
import collections
import json
import torch.nn as nn
import logging
from eight_mile.progress import create_progress_bar
import string
import re
import os
from eight_mile.pytorch.optz import OptimizerManager
from baseline.train import register_training_func, register_trainer, EpochReportingTrainer, create_trainer
from baseline.model import create_model_for
from typing import List
from mead.tasks import Task, Backend, register_task
from mead.utils import read_config_file_or_json, index_by_label, print_dataset_info
from eight_mile.downloads import DataDownloader
import math
import regex
logger = logging.getLogger('baseline')
# Use for GPT2, RoBERTa, Longformer
BPE_PATTERN = regex.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
def convert_tokens_to_ids_if(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
id = vocab.get(item, Offsets.UNK)
output.append(id)
if id == Offsets.UNK:
logger.warning(f"Invalid vocab item. Treating as UNK: [{item}]")
return output
def bpe_tokenize(s, strip_ws=True):
s_out = regex.findall(BPE_PATTERN, s)
return s_out if not strip_ws else [w.strip() for w in s_out]
def bu_tokenize(s, strip_ws=True):
import toky
s_out = [s.get_text() for s in toky.bu_assembly(s)]
return [s for s in s_out if s not in ['<', '>']]
def whitespace_tokenize(s, strip_ws=True):
return s.split()
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class MRCExample:
"""Intermediate object that holds a single QA sample, this gets converted to features later
"""
def __init__(self,
qas_id,
query_item,
doc_tokens=None,
answers=None,
#orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=None):
self.qas_id = qas_id
self.query_item = query_item
self.doc_tokens = doc_tokens
#self.orig_answer_text = orig_answer_text
self.answers = answers
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = f"qas_id: {self.qas_id}, query_item: {self.query_item}\ncontext_item: {' '.join(self.doc_tokens)}"
if self.start_position:
s += f"\nstart_position: {self.start_position}"
if self.start_position:
s += f"\nend_position: {self.end_position}"
if self.start_position:
s += f"\nis_impossible: {self.is_impossible}"
return s
class InputFeatures:
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
# self.span_position = span_position
self.is_impossible = is_impossible
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def read_examples(input_file, is_training):
"""Read SQuaD style formatted examples, both v1.1 and v2
For v1.1, the is_impossible field is absent, so default that to False here to support both.
:param input_file:
:param is_training:
:return:
"""
examples = []
with open(input_file, "r") as f:
input_data = json.load(f)['data']
pg = create_progress_bar(len(input_data))
for entry in pg(input_data):
for paragraph in entry['paragraphs']:
paragraph_text = paragraph['context']
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph['qas']:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
#orig_answer_text = None
is_impossible = False
if is_training:
is_impossible = bool(qa.get('is_impossible', False))
# The dev set has more than one example possibly
# The BERT code raises an error, which makes sense when eval is offline
if not is_impossible:
all_answers = []
skip_example = False
for ai, answer in enumerate(qa['answers']):
# For training we have a single answer, for dev the scoring takes into account all answers
# so in order to do this the way we want with inline eval we need to handle this, right now
# our scores are too low because we only use the first
if ai == 0:
orig_answer_text = answer['text']
answer_offset = answer['answer_start']
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
actual_text = ' '.join(
doc_tokens[start_position:(end_position + 1)]
)
cleaned_answer_text = ' '.join(whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text)
skip_example = True
break
if answer['text'] not in all_answers:
all_answers.append(answer['text'])
if skip_example:
continue
# This should only happen outside of mead-train for offline evaluation
else:
start_position = -1
end_position = -1
all_answers = []
example = MRCExample(qas_id,
question_text,
doc_tokens,
all_answers,
start_position,
end_position,
is_impossible)
examples.append(example)
return examples
class MRCDatasetIterator(IterableDataset):
def __init__(self, input_file, vectorizer, mxlen=384, has_impossible=True, is_training=True, doc_stride=128, mxqlen=64,
shuffle=True, tok_type=None, strip_ws=True):
super().__init__()
self.vectorizer = vectorizer
self.CLS_TOKEN = '[CLS]'
if '<EOU>' in self.vectorizer.vocab:
self.EOU_TOKEN = '<EOU>'
elif '[SEP]' in self.vectorizer.vocab:
self.EOU_TOKEN = '[SEP]'
else:
self.EOU_TOKEN = Offsets.VALUES[Offsets.EOS]
print('SEP token', self.EOU_TOKEN)
self.input_file = input_file
self.mxlen = mxlen
self.doc_stride = doc_stride
self.mxqlen = mxqlen
self.has_impossible = has_impossible
self.is_training = is_training
self.tokenizer_fn = whitespace_tokenize
if tok_type == 'pretok' or tok_type == 'bpe':
logger.warning("Doing GPT-style pre-tokenization. This may not be necessary for WordPiece vectorizers")
self.tokenizer_fn = bu_tokenize
elif tok_type == 'toky':
logger.warning("Doing toky tokenization.")
self.tokenizer_fn = bu_tokenize
self.strip_ws = strip_ws
if self.strip_ws:
logger.warning("Stripping leading whitespace on tokens. This may not be required for GPT*, RoBERTa or variants")
self.examples = read_examples(input_file, is_training)
# Add a tokenized version to all examples
for example in self.examples:
example.answers = [' '.join(self.tokenize(a)) for a in example.answers]
self.shuffle = shuffle
def _improve_answer_span(self, doc_tokens, input_start, input_end, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was <NAME> born?
# Context: The leader was <NAME> (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
# TODO: For right now we dont need this because we will always be tokenizing our orig_answer_text
##tok_answer_text = " ".join(self.tokenize(orig_answer_text))
| |
__all__ = [
"eval_nls",
"ev_nls",
"eval_min",
"ev_min",
]
from grama import add_pipe, pipe, custom_formatwarning, df_make
from grama import eval_df, eval_nominal, eval_monte_carlo
from grama import comp_marginals, comp_copula_independence
from grama import tran_outer
from numpy import Inf, isfinite
from numpy.random import seed as setseed
from pandas import DataFrame, concat
from scipy.optimize import minimize
from toolz import curry
## Nonlinear least squares
# --------------------------------------------------
@curry
def eval_nls(
model,
df_data=None,
out=None,
var_fix=None,
df_init=None,
append=False,
tol=1e-6,
ftol=1e-9,
gtol=1e-5,
n_maxiter=100,
n_restart=1,
method="L-BFGS-B",
seed=None,
verbose=True,
):
r"""Estimate with Nonlinear Least Squares (NLS)
Estimate best-fit variable levels with nonlinear least squares (NLS).
Args:
model (gr.Model): Model to analyze. All model variables
selected for fitting must be bounded or random. Deterministic
variables may have semi-infinite bounds.
df_data (DataFrame): Data for estimating parameters. Variables not
found in df_data optimized in fitting.
out (list or None): Output contributions to consider in computing MSE.
Assumed to be model.out if left as None.
var_fix (list or None): Variables to fix to nominal levels. Note that
variables with domain width zero will automatically be fixed.
df_init (DataFrame): Initial guesses for parameters; overrides n_restart
append (bool): Append metadata? (Initial guess, MSE, optimizer status)
tol (float): Optimizer convergence tolerance
n_maxiter (int): Optimizer maximum iterations
n_restart (int): Number of restarts; beyond n_restart=1 random
restarts are used.
seed (int OR None): Random seed for restarts
verbose (bool): Print messages to console?
Returns:
DataFrame: Results of estimation
Examples:
>>> import grama as gr
>>> from grama.data import df_trajectory_full
>>> from grama.models import make_trajectory_linear
>>>
>>> md_trajectory = make_trajectory_linear()
>>>
>>> df_fit = (
>>> md_trajectory
>>> >> gr.ev_nls(df_data=df_trajectory_full)
>>> )
>>>
>>> print(df_fit)
"""
## Check `out` invariants
if out is None:
out = model.out
if verbose:
print("... eval_nls setting out = {}".format(out))
set_diff = set(out).difference(set(df_data.columns))
if len(set_diff) > 0:
raise ValueError(
"out must be subset of df_data.columns\n"
+ "difference = {}".format(set_diff)
)
## Determine variables to be fixed
if var_fix is None:
var_fix = set()
else:
var_fix = set(var_fix)
for var in model.var_det:
wid = model.domain.get_width(var)
if wid == 0:
var_fix.add(var)
if verbose:
print("... eval_nls setting var_fix = {}".format(list(var_fix)))
## Determine variables for evaluation
var_feat = set(model.var).intersection(set(df_data.columns))
if verbose:
print("... eval_nls setting var_feat = {}".format(list(var_feat)))
## Determine variables for fitting
var_fit = set(model.var).difference(var_fix.union(var_feat))
if len(var_fit) == 0:
raise ValueError(
"No var selected for fitting!\n"
+ "Try checking model bounds and df_data.columns."
)
## Separate var_fit into det and rand
var_fit_det = list(set(model.var_det).intersection(var_fit))
var_fit_rand = list(set(model.var_rand).intersection(var_fit))
## Construct bounds, fix var_fit order
var_fit = var_fit_det + var_fit_rand
bounds = []
var_prob = []
for var in var_fit_det:
if not isfinite(model.domain.get_nominal(var)):
var_prob.append(var)
bounds.append(model.domain.get_bound(var))
if len(var_prob) > 0:
raise ValueError(
"all variables to be fitted must finite nominal value\n"
+ "offending var = {}".format(var_prob)
)
for var in var_fit_rand:
bounds.append(
(model.density.marginals[var].q(0), model.density.marginals[var].q(1),)
)
## Determine initial guess points
df_nom = eval_nominal(model, df_det="nom", skip=True)
## Use specified initial guess(es)
if not (df_init is None):
# Check invariants
set_diff = set(var_fit).difference(set(df_init.columns))
if len(set_diff) > 0:
raise ValueError(
"var_fit must be subset of df_init.columns\n"
+ "difference = {}".format(set_diff)
)
# Pull n_restart
n_restart = df_init.shape[0]
## Generate initial guess(es)
else:
df_init = df_nom[var_fit]
if n_restart > 1:
if not (seed is None):
setseed(seed)
## Collect sweep-able deterministic variables
var_sweep = list(
filter(
lambda v: isfinite(model.domain.get_width(v))
& (model.domain.get_width(v) > 0),
model.var_det,
)
)
## Generate pseudo-marginals
dicts_var = {}
for v in var_sweep:
dicts_var[v] = {
"dist": "uniform",
"loc": model.domain.get_bound(v)[0],
"scale": model.domain.get_width(v),
}
## Overwrite model
md_sweep = comp_marginals(model, **dicts_var)
md_sweep = comp_copula_independence(md_sweep)
## Generate random start points
df_rand = eval_monte_carlo(
md_sweep, n=n_restart - 1, df_det="nom", skip=True,
)
df_init = concat((df_init, df_rand[var_fit]), axis=0).reset_index(drop=True)
## Iterate over initial guesses
df_res = DataFrame()
for i in range(n_restart):
x0 = df_init[var_fit].iloc[i].values
## Build evaluator
def objective(x):
"""x = [var_fit]"""
## Evaluate model
df_var = tran_outer(
df_data[var_feat],
concat(
(df_nom[var_fix].iloc[[0]], df_make(**dict(zip(var_fit, x)))),
axis=1,
),
)
df_tmp = eval_df(model, df=df_var)
## Compute joint MSE
return ((df_tmp[out].values - df_data[out].values) ** 2).mean()
## Run optimization
res = minimize(
objective,
x0,
args=(),
method=method,
jac=False,
tol=tol,
options={"maxiter": n_maxiter, "disp": False, "ftol": ftol, "gtol": gtol,},
bounds=bounds,
)
## Package results
df_tmp = df_make(
**dict(zip(var_fit, res.x)),
**dict(zip(map(lambda s: s + "_0", var_fit), x0)),
)
df_tmp["success"] = [res.success]
df_tmp["message"] = [res.message]
df_tmp["n_iter"] = [res.nit]
df_tmp["mse"] = [res.fun]
df_res = concat((df_res, df_tmp,), axis=0,).reset_index(drop=True)
## Post-process
if append:
return df_res
else:
return df_res[var_fit]
ev_nls = add_pipe(eval_nls)
## Minimize
# --------------------------------------------------
@curry
def eval_min(
model,
out_min=None,
out_geq=None,
out_leq=None,
out_eq=None,
method="SLSQP",
tol=1e-6,
n_restart=1,
n_maxiter=50,
seed=None,
df_start=None,
):
r"""Constrained minimization using functions from a model
Perform constrained minimization using functions from a model. Model must
have deterministic variables only.
Wrapper for scipy.optimize.minimize
Args:
model (gr.Model): Model to analyze. All model variables must be
deterministic.
out_min (str): Output to use as minimization objective.
out_geq (None OR list of str): Outputs to use as geq constraints; var >= 0
out_leq (None OR list of str): Outputs to use as leq constraints; var <= 0
out_eq (None OR list of str): Outputs to use as equality constraints; var == 0
method (str): Optimization method; see the documentation for
scipy.optimize.minimize for options.
tol (float): Optimization objective convergence tolerance
n_restart (int): Number of restarts; beyond n_restart=1 random
restarts are used.
df_start (None or DataFrame): Specific starting values to use; overrides
n_restart if non None provided.
Returns:
DataFrame: Results of optimization
Examples:
>>> import grama as gr
>>> md = (
>>> gr.Model("Constrained Rosenbrock")
>>> >> gr.cp_function(
>>> fun=lambda x: (1 - x[0])**2 + 100*(x[1] - x[0]**2)**2,
>>> var=["x", "y"],
>>> out=["c"],
>>> )
>>> >> gr.cp_function(
>>> fun=lambda x: (x[0] - 1)**3 - x[1] + 1,
>>> var=["x", "y"],
>>> out=["g1"],
>>> )
>>> >> gr.cp_function(
>>> fun=lambda x: x[0] + x[1] - 2,
>>> var=["x", "y"],
>>> out=["g2"],
>>> )
>>> >> gr.cp_bounds(
>>> x=(-1.5, +1.5),
>>> y=(-0.5, +2.5),
>>> )
>>> )
>>> md >> gr.ev_min(
>>> out_min="c",
>>> out_leq=["g1", "g2"]
>>> )
"""
## Check that model has only deterministic variables
if model.n_var_rand > 0:
raise ValueError("model must have no random variables")
## Check that objective is in model
if not (out_min in model.out):
raise ValueError("model must contain out_min")
## Check that constraints are in model
if not (out_geq is None):
out_diff = set(out_geq).difference(set(model.out))
if len(out_diff) > 0:
raise ValueError(
"model must contain each out_geq; missing {}".format(out_diff)
)
if not (out_leq is None):
out_diff = set(out_leq).difference(set(model.out))
if len(out_diff) > 0:
raise ValueError(
"model must contain each out_leq; missing {}".format(out_diff)
)
if not (out_eq is None):
out_diff = set(out_eq).difference(set(model.out))
if len(out_diff) > 0:
raise ValueError(
"model must contain each out_eq; missing {}".format(out_diff)
)
## Formulate initial guess
df_nom = eval_nominal(model, df_det="nom", skip=True)
if df_start is None:
df_start = df_nom[model.var]
if n_restart > 1:
if not (seed is None):
setseed(seed)
## Collect sweep-able deterministic variables
var_sweep = list(
filter(
lambda v: isfinite(model.domain.get_width(v))
& (model.domain.get_width(v) > 0),
model.var_det,
)
)
## Generate pseudo-marginals
dicts_var = {}
for v in var_sweep:
dicts_var[v] = {
"dist": "uniform",
"loc": model.domain.get_bound(v)[0],
"scale": model.domain.get_width(v),
}
## Overwrite model
md_sweep = comp_marginals(model, **dicts_var)
md_sweep = comp_copula_independence(md_sweep)
## Generate random start points
df_rand = eval_monte_carlo(
md_sweep, n=n_restart - 1, df_det="nom", skip=True,
)
df_start = concat((df_start, df_rand[model.var]), axis=0).reset_index(
drop=True
)
else:
n_restart = df_start.shape[0]
## Factory for wrapping model's output
def make_fun(out, sign=+1):
def fun(x):
df = DataFrame([x], columns=model.var)
df_res = eval_df(model, df)
return sign * df_res[out]
return fun
## Create helper functions for constraints
constraints = []
if not (out_geq is None):
for out in out_geq:
constraints.append(
{"type": "ineq", "fun": make_fun(out),}
)
if not (out_leq is None):
for out in out_leq:
constraints.append(
{"type": "ineq", "fun": make_fun(out, sign=-1),}
)
if not (out_eq is None):
for out in out_eq:
constraints.append(
{"type": "eq", "fun": make_fun(out),}
)
## Parse the bounds for minimize
bounds = list(map(lambda k: model.domain.bounds[k], model.var))
## Run optimization
df_res = DataFrame()
for i in range(n_restart):
x0 = df_start[model.var].iloc[i].values
res = minimize(
make_fun(out_min),
x0,
args=(),
method=method,
jac=False,
tol=tol,
options={"maxiter": n_maxiter, "disp": False},
constraints=constraints,
bounds=bounds,
)
df_opt = df_make(
**dict(zip(model.var, res.x)),
**dict(zip(map(lambda s: s + "_0", model.var), x0)),
)
| |
from datetime import datetime, timezone
from typing import Union, List, Dict, Tuple
from .covidstatistics import *
from .exceptions import NotFound, BadSortParameter, BadYesterdayParameter, BadTwoDaysAgoParameter, BadAllowNoneParameter
from .covidendpoints import *
class Covid:
"""
Handles interactions with the Open Disease API's COVID-19 data.
"""
def __init__(self, api_url, request_client):
self.api_url = api_url
self.request_client = request_client
def _check_sort(self, sort):
if sort not in ['updated', 'country', 'countryInfo', 'cases', 'todayCases', 'deaths', 'todayDeaths', 'recovered',
'todayRecovered', 'active', 'critical', 'casesPerOneMillion', 'deathsPerOneMillion', 'tests',
'testsPerOneMillion', 'population', 'continent', 'oneCasePerPeople', 'oneDeathPerPeople',
'oneTestPerPeople', 'activePerOneMillion', 'recoveredPerOneMillion', 'criticalPerOneMillion']:
raise BadSortParameter('Invalid sort parameter.')
def _check_yesterday(self, value):
if not isinstance(value, bool):
raise BadYesterdayParameter('Value for yesterday should either be True or False.')
def _check_two_days_ago(self, value):
if not isinstance(value, bool):
raise BadTwoDaysAgoParameter('Value for two_days_ago should either be True or False.')
def _check_allow_none(self, value):
if not isinstance(value, bool):
raise BadAllowNoneParameter('Value for allow_none should either be True or False.')
def _compile_today(self, data):
return Today(
data.get('todayCases'),
data.get('todayDeaths'),
data.get('todayRecovered')
)
def _compile_permillion(self, data):
return PerMillion(
data.get('casesPerOneMillion'),
data.get('deathsPerOneMillion'),
data.get('testsPerOneMillion'),
data.get('activePerOneMillion'),
data.get('recoveredPerOneMillion'),
data.get('criticalPerOneMillion')
)
def _compile_perpeople(self, data):
return PerPeople(
data.get('oneCasePerPeople'),
data.get('oneDeathPerPeople'),
data.get('oneTestPerPeople')
)
def _compile_statetoday(self, data):
return StateToday(
data.get('todayCases'),
data.get('todayDeaths')
)
def _compile_statepermillion(self, data):
return StatePerMillion(
data.get('casesPerOneMillion'),
data.get('deathsPerOneMillion'),
data.get('testsPerOneMillion')
)
def _compile_countryInfo(self, countryInfo):
_id = countryInfo.get("_id")
iso2 = countryInfo.get("iso2")
iso3 = countryInfo.get("iso3")
_lat = countryInfo.get("lat")
_long = countryInfo.get("long")
flag = countryInfo.get("flag")
info = CountryInfo(
_id,
iso2,
iso3,
_lat,
_long,
flag
)
return info
def _compile_country_data(self, country_stats):
country_name = country_stats.get("country")
total_country_cases = country_stats.get("cases", 0)
total_country_deaths = country_stats.get("deaths", 0)
total_country_recoveries = country_stats.get("recovered", 0)
today = self._compile_today(country_stats)
total_critical = country_stats.get("critical", 0)
active = country_stats.get("active", 0)
tests = country_stats.get("tests", 0)
per_million = self._compile_permillion(country_stats)
per_people = self._compile_perpeople(country_stats)
continent = country_stats.get("continent")
population = country_stats.get("population", 0)
updated_epoch = country_stats.get("updated", 0)
updated = datetime.utcfromtimestamp(updated_epoch/1000.0)
countryInfo = country_stats["countryInfo"]
info = self._compile_countryInfo(countryInfo)
return Country(
info,
country_name,
total_country_cases,
total_country_deaths,
total_country_recoveries,
today,
total_critical,
active,
tests,
per_million,
per_people,
continent,
population,
updated
)
def _compile_state(self, state_dict):
state_name = state_dict.get("state")
total_state_cases = state_dict.get("cases", 0)
total_state_deaths = state_dict.get("deaths", 0)
today = self._compile_statetoday(state_dict)
active = state_dict.get("active", 0)
tests = state_dict.get("tests", 0)
per_million = self._compile_statepermillion(state_dict)
state_stats = State(
state_name,
total_state_cases,
total_state_deaths,
today,
active,
tests,
per_million
)
return state_stats
def _generate_history(self, historical_stats, is_county=False):
case_history = []
death_history = []
recovery_history = [] if not is_county else None
if not is_county:
country_name = historical_stats.get("country", "Global")
province_name = historical_stats.get("province")
else:
country_name = historical_stats.get("province")
province_name = historical_stats.get("county")
if "timeline" not in historical_stats: #if country was 'all'
d = historical_stats
else:
d = historical_stats["timeline"]
for date in list(d["cases"].keys()): #pass on all historical data. let the client decide how much of it they want
_d = datetime.strptime(date, "%m/%d/%y")
case_history.append(HistoryEntry(_d, d["cases"][date]))
death_history.append(HistoryEntry(_d, d["deaths"][date]))
if not is_county:
recovery_history.append(HistoryEntry(date, d["recovered"][date]))
his = History(
case_history,
death_history,
recovery_history
)
return Historical(
country_name,
province_name,
his
)
def _compile_jhu_data(self, matching_county):
country = matching_county.get("country") #will always be 'US'
province = matching_county.get("province")
county_name = matching_county.get("county")
confirmed_cases = matching_county["stats"].get("confirmed")
deaths = matching_county["stats"].get("deaths")
recoveries = matching_county["stats"].get("recovered")
_lat = float(matching_county["coordinates"].get("latitude")) if matching_county["coordinates"].get("latitude") else 0.0
_long = float(matching_county["coordinates"].get("longitude")) if matching_county["coordinates"].get("longitude") else 0.0
updated = datetime.strptime(matching_county.get('updatedAt'), '%Y-%m-%d %H:%M:%S')
stat = JhuCsse(
country,
province,
county_name,
updated,
confirmed_cases,
deaths,
recoveries,
_lat,
_long
)
return stat
def _compile_continent(self, data):
name = data.get('continent')
countries = data.get('countries')
cases = data.get("cases", 0)
deaths = data.get("deaths", 0)
recoveries = data.get("recovered", 0)
today = self._compile_today(data)
critical = data.get("critical", 0)
updated_epoch = data.get("updated", 0)
active = data.get("active", cases-deaths-recoveries)
tests = data.get("tests", 0)
per_million = self._compile_permillion(data)
population = data.get("population", 0)
updated = datetime.utcfromtimestamp(updated_epoch/1000.0)
return Continent(
name,
countries,
cases,
deaths,
recoveries,
critical,
active,
tests,
today,
per_million,
population,
updated
)
def _compile_state_list(self, data):
return [self._compile_nyt_state(d) for d in data]
def _compile_county_list(self, data):
return [self._compile_nyt_county(d) for d in data]
def _compile_nyt_state(self, data):
date = data.get('date')
state = data.get('state')
fips = data.get('fips')
cases = data.get('cases')
deaths = data.get('deaths')
if date:
date = datetime.strptime(date, "%Y-%m-%d")
return NewYorkTimesState(
date,
state,
fips,
cases,
deaths
)
def _compile_nyt_county(self, data):
date = data.get('date')
county = data.get('county')
state = data.get('state')
fips = data.get('fips')
cases = data.get('cases')
deaths = data.get('deaths')
if date:
date = datetime.strptime(date, "%Y-%m-%d")
return NewYorkTimesCounty(
date,
county,
state,
fips,
cases,
deaths
)
def _compile_apple_stats(self, data):
name = data.get("subregion_and_city")
_type = data.get("get_type")
date = data.get("date")
driving = data.get("driving")
transit = data.get("transit")
walking = data.get("walking")
if date:
date = datetime.strptime(date, "%Y-%m-%d")
return Mobility(
name,
_type,
date,
driving,
transit,
walking
)
def _compile_vaccine(self, data):
return Vaccine(
data.get("candidate"),
data.get("sponsors"),
data.get("details"),
data.get("trialPhase"),
data.get("institutions"),
data.get("funding")
)
def _compile_vaccines(self, data):
source = data.get("source")
return Vaccines(
source,
[self._compile_vaccine(vacc) for vacc in data["data"]]
)
def _compile_vax_tl(self, data):
return [VaccineTimeline(datetime.strptime(date, '%m/%d/%y'), data[date]) for date in data]
def _compile_vax_country(self, data):
return VaccineCountry(data['country'], self._compile_vax_tl(data['timeline']))
######################################################################################
async def all(self, **kwargs) -> Global:
"""
Get the global stats for Coronavirus COVID-19
"""
yesterday = kwargs.get('yesterday', False)
two_days_ago = kwargs.get('two_days_ago', False)
allow_none = kwargs.get('allow_none', False)
endpoint = GLOBAL_DATA.format(self.api_url)
if yesterday:
self._check_yesterday(yesterday)
if two_days_ago:
self._check_two_days_ago(two_days_ago)
if yesterday and two_days_ago:
raise ValueError('yesterday and two_days_ago cannot both be True.')
if allow_none:
self._check_allow_none(allow_none)
yesterday = str(yesterday).lower()
two_days_ago = str(two_days_ago).lower()
allow_none = str(allow_none).lower()
params = {"yesterday": yesterday, "twoDaysAgo": two_days_ago, "allowNull": allow_none}
global_data = await self.request_client.make_request(endpoint, params)
cases = global_data.get("cases", 0)
deaths = global_data.get("deaths", 0)
recoveries = global_data.get("recovered", 0)
today = self._compile_today(global_data)
total_critical = global_data.get("critical", 0)
updated_epoch = global_data.get("updated", 0)
active = global_data.get("active", 0)
tests = global_data.get("tests", 0)
per_million = self._compile_permillion(global_data)
per_people = self._compile_perpeople(global_data)
population = global_data.get("population", 0)
affected_countries = global_data.get("affectedCountries")
updated = datetime.utcfromtimestamp(updated_epoch/1000.0)
return Global(
cases,
deaths,
recoveries,
today,
total_critical,
active,
tests,
per_million,
per_people,
population,
affected_countries,
updated,
)
async def country(self, *countries, **kwargs) -> Union[Country, List[Country]]:
"""
Get the data for more than one country, but not necessarily all of them.
"""
yesterday = kwargs.get('yesterday', False)
two_days_ago = kwargs.get('two_days_ago', False)
allow_none = kwargs.get('allow_none', False)
country_list = ','.join(map(str, countries))
endpoint = COUNTRY_DATA.format(self.api_url, country_list)
if yesterday:
self._check_yesterday(yesterday)
if two_days_ago:
self._check_two_days_ago(two_days_ago)
if yesterday and two_days_ago:
raise ValueError('yesterday and two_days_ago cannot both be True.')
if allow_none:
self._check_allow_none(allow_none)
yesterday = str(yesterday).lower()
two_days_ago = str(two_days_ago).lower()
allow_none = str(allow_none).lower()
params = {"yesterday": yesterday, "twoDaysAgo": two_days_ago, "allowNull": allow_none}
data = await self.request_client.make_request(endpoint, params)
if isinstance(data, dict):
return self._compile_country_data(data)
return [self._compile_country_data(country) for country in data]
async def all_countries(self, **kwargs) -> List[Country]:
"""
Get the data for every affected country.
"""
yesterday = kwargs.get('yesterday', False)
two_days_ago = kwargs.get('two_days_ago', False)
allow_none = kwargs.get('allow_none', False)
sort = kwargs.get('sort', None)
endpoint = ALL_COUNTRIES.format(self.api_url)
params = None
if yesterday:
self._check_yesterday(yesterday)
if two_days_ago:
self._check_two_days_ago(two_days_ago)
if yesterday and two_days_ago:
raise ValueError('yesterday and two_days_ago cannot both be True.')
if allow_none:
self._check_allow_none(allow_none)
yesterday = str(yesterday).lower()
two_days_ago = str(two_days_ago).lower()
allow_none = str(allow_none).lower()
if sort:
self._check_sort(sort)
params = {"yesterday": yesterday, "twoDaysAgo": two_days_ago, "allowNull": allow_none, "sort": sort}
else:
params = {"yesterday": yesterday, "twoDaysAgo": two_days_ago, "allowNull": allow_none}
all_countries = await self.request_client.make_request(endpoint, params)
return [self._compile_country_data(c) for c in all_countries]
async def all_states(self, **kwargs) -> List[State]:
"""
Get the stats for all US states
"""
yesterday = kwargs.get('yesterday', False)
allow_none = kwargs.get('allow_none', False)
sort = kwargs.get('sort', None)
endpoint = ALL_STATES.format(self.api_url)
params = None
if yesterday:
self._check_yesterday(yesterday)
if allow_none:
self._check_allow_none(allow_none)
yesterday = str(yesterday).lower()
allow_none = str(allow_none).lower()
if sort:
self._check_sort(sort)
params = {"yesterday": yesterday, "allowNull": allow_none, "sort": sort}
else:
params = {"yesterday": yesterday, "allowNull": allow_none}
state_info = await self.request_client.make_request(endpoint, params)
return [self._compile_state(state) for state in state_info]
async def state(self, *states, **kwargs) -> Union[State, List[State]]:
"""
Get the stats for US States
"""
yesterday = kwargs.get('yesterday', False)
allow_none = kwargs.get('allow_none', False)
state_list = ','.join(map(str, states))
endpoint = SINGLE_STATE.format(self.api_url, state_list)
if yesterday:
self._check_yesterday(yesterday)
if allow_none:
self._check_allow_none(allow_none)
yesterday = str(yesterday).lower()
allow_none = str(allow_none).lower()
params = {"yesterday": yesterday, "allowNull": allow_none}
data = await self.request_client.make_request(endpoint, params)
if isinstance(data, dict):
return self._compile_state(data)
return [self._compile_state(state) for state in data]
async def country_history(self, country='all', last_days='all') -> Historical:
"""
Get historical data for a specific country or globally.
Defaults to 'all' in order to get global data. This can be overridden by the client.
"""
endpoint = HISTORICAL_COUNTRY.format(self.api_url, country)
params = {"lastdays": last_days}
historical_stats = await self.request_client.make_request(endpoint, params)
return self._generate_history(historical_stats)
async def province_history(self, country, province, last_days='all') -> Historical:
"""
Get the historical data for a province within a country.
"""
endpoint = HISTORICAL_PROVINCE.format(self.api_url, country, province)
params = {"lastdays": last_days}
data = await self.request_client.make_request(endpoint, | |
'',
'next': '',
'page': 1,
})
self.assertAPINotes(resp_json, self.normal.note_set.order_by(Note.id))
# do a filter following a join
resp = self.app.get('/api/note/?user__username=admin&ordering=id')
resp_json = self.response_json(resp)
self.assertAPIMeta(resp_json, {
'model': 'note',
'previous': '',
'next': '',
'page': 1,
})
self.assertAPINotes(resp_json, self.admin.note_set.order_by(Note.id))
# filter multiple fields
notes = list(self.admin.note_set.order_by(Note.id))
third_id = notes[3].id
resp = self.app.get('/api/note/?user__username=admin&id__lt=%s&ordering=id' % third_id)
resp_json = self.response_json(resp)
self.assertAPINotes(resp_json, notes[:3])
# do a filter using multiple values
resp = self.app.get('/api/note/?user__username=admin&user__username=inactive&ordering=id')
resp_json = self.response_json(resp)
self.assertAPIMeta(resp_json, {
'model': 'note',
'previous': '',
'next': '',
'page': 1,
})
self.assertAPINotes(resp_json, Note.filter(user__in=[self.admin, self.inactive]).order_by(Note.id))
# do a filter with a negation
resp = self.app.get('/api/note/?-user__username=admin&ordering=id')
resp_json = self.response_json(resp)
self.assertAPINotes(resp_json, Note.filter(user__in=[
self.normal, self.inactive]).order_by(Note.id))
# do a filter with an IN operator and multiple IDs
# https://github.com/coleifer/flask-peewee/issues/112
resp = self.app.get('/api/note/?id__in=1,2,5')
resp_json = self.response_json(resp)
self.assertAPINotes(resp_json, Note.filter(id__in=[1,2,5]).order_by(Note.id))
# also test that the IN operator works with list of strings
resp = self.app.get('/api/user/?username__in=admin,normal')
resp_json = self.response_json(resp)
self.assertAPIUsers(resp_json, User.filter(username__in=['admin', 'normal']).order_by(User.id))
def test_filter_with_pagination(self):
users, notes = self.get_users_and_notes()
notes = list(self.admin.note_set.order_by(Note.id))
# do a simple filter on a related model
resp = self.app.get('/api/note/?user__username=admin&limit=4&ordering=id')
resp_json = self.response_json(resp)
self.assertAPINotes(resp_json, notes[:4])
next_url = resp_json['meta']['next']
resp = self.app.get(next_url)
resp_json = self.response_json(resp)
self.assertAPINotes(resp_json, notes[4:8])
next_url = resp_json['meta']['next']
resp = self.app.get(next_url)
resp_json = self.response_json(resp)
self.assertEqual(resp_json['meta']['next'], '')
self.assertAPINotes(resp_json, notes[8:])
prev_url = resp_json['meta']['previous']
resp = self.app.get(prev_url)
resp_json = self.response_json(resp)
self.assertAPINotes(resp_json, notes[4:8])
prev_url = resp_json['meta']['previous']
resp = self.app.get(prev_url)
resp_json = self.response_json(resp)
self.assertEqual(resp_json['meta']['previous'], '')
self.assertAPINotes(resp_json, notes[:4])
class RestApiUserAuthTestCase(RestApiTestCase):
def setUp(self):
super(RestApiUserAuthTestCase, self).setUp()
self.create_users()
def create_notes(self):
notes = [
Note.create(user=self.admin, message='admin'),
Note.create(user=self.normal, message='normal'),
]
self.admin_note, self.normal_note = notes
return notes
def test_list_get(self):
resp = self.app.get('/api/note/')
resp_json = self.response_json(resp)
self.assertAPIResponse(resp_json, [])
self.assertAPIMeta(resp_json, {'model': 'note', 'next': '', 'page': 1, 'previous': ''})
self.create_notes()
resp = self.app.get('/api/note/?ordering=id')
resp_json = self.response_json(resp)
self.assertAPINotes(resp_json, [
self.admin_note,
self.normal_note,
])
def test_detail_get(self):
resp = self.app.get('/api/note/1/')
self.assertEqual(resp.status_code, 404)
self.create_notes()
resp = self.app.get('/api/note/%s/' % self.normal_note.id)
resp_json = self.response_json(resp)
self.assertAPINote(resp_json, self.normal_note)
def test_auth_create(self):
note_data = {'message': 'test', 'user': self.inactive.id}
serialized = json.dumps(note_data)
# this request is not authorized
resp = self.app.post('/api/note/', data=serialized)
self.assertEqual(resp.status_code, 401)
# authorized, but user does not exist in database
resp = self.app.post('/api/note/', data=serialized, headers=self.auth_headers('xxx', 'xxx'))
self.assertEqual(resp.status_code, 401)
# authorized, user in database
resp = self.app.post('/api/note/', data=serialized, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 200)
def test_create(self):
note_data = {'message': 'test', 'user': self.inactive.id}
serialized = json.dumps(note_data)
# authorized as an admin
resp = self.app.post('/api/note/', data=serialized, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 200)
new_note = Note.get(message='test')
self.assertEqual(new_note.user, self.inactive)
resp_json = self.response_json(resp)
self.assertAPINote(resp_json, new_note)
def test_auth_edit(self):
self.create_notes()
note_data = {'message': 'edited'}
serialized = json.dumps(note_data)
url = '/api/note/%s/' % self.admin_note.id
# this request is not authorized
resp = self.app.put(url, data=serialized)
self.assertEqual(resp.status_code, 401)
# authorized, but user does not exist in database
resp = self.app.put(url, data=serialized, headers=self.auth_headers('xxx', 'xxx'))
self.assertEqual(resp.status_code, 401)
# authorized, user in database
resp = self.app.put(url, data=serialized, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 200)
def test_edit(self):
self.create_notes()
note_data = {'message': 'edited'}
serialized = json.dumps(note_data)
url = '/api/note/%s/' % self.admin_note.id
# authorized as an admin
resp = self.app.put(url, data=serialized, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 200)
note = Note.get(id=self.admin_note.id)
self.assertEqual(note.message, 'edited')
resp_json = self.response_json(resp)
self.assertAPINote(resp_json, note)
def test_auth_delete(self):
self.create_notes()
url = '/api/note/%s/' % self.admin_note.id
# this request is not authorized
resp = self.app.delete(url)
self.assertEqual(resp.status_code, 401)
# authorized, but user does not exist in database
resp = self.app.delete(url, headers=self.auth_headers('xxx', 'xxx'))
self.assertEqual(resp.status_code, 401)
# authorized, user in database
resp = self.app.delete(url, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 200)
def test_delete(self):
self.create_notes()
url = '/api/note/%s/' % self.admin_note.id
# authorized as an admin
resp = self.app.delete(url, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 200)
self.assertEqual(Note.select().count(), 1)
resp_json = self.response_json(resp)
self.assertEqual(resp_json, {'deleted': 1})
class RestApiOwnerAuthTestCase(RestApiTestCase):
def setUp(self):
super(RestApiOwnerAuthTestCase, self).setUp()
self.create_users()
def create_messages(self):
messages = [
Message.create(user=self.admin, content='admin'),
Message.create(user=self.normal, content='normal'),
]
self.admin_message, self.normal_message = messages
return messages
def test_list_get(self):
resp = self.app.get('/api/message/')
resp_json = self.response_json(resp)
self.assertAPIResponse(resp_json, [])
self.assertAPIMeta(resp_json, {'model': 'message', 'next': '', 'page': 1, 'previous': ''})
self.create_messages()
resp = self.app.get('/api/message/?ordering=id')
resp_json = self.response_json(resp)
self.assertAPIMessages(resp_json, [
self.admin_message,
self.normal_message,
])
def test_detail_get(self):
resp = self.app.get('/api/message/1/')
self.assertEqual(resp.status_code, 404)
self.create_messages()
resp = self.app.get('/api/message/%s/' % self.normal_message.id)
resp_json = self.response_json(resp)
self.assertAPIMessage(resp_json, self.normal_message)
def test_auth_create(self):
message_data = {'content': 'test'}
serialized = json.dumps(message_data)
# this request is not authorized
resp = self.app.post('/api/message/', data=serialized)
self.assertEqual(resp.status_code, 401)
# authorized, but user does not exist in database
resp = self.app.post('/api/message/', data=serialized, headers=self.auth_headers('xxx', 'xxx'))
self.assertEqual(resp.status_code, 401)
# authorized, user in database
resp = self.app.post('/api/message/', data=serialized, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 200)
def test_create(self):
message_data = {'content': 'test'}
serialized = json.dumps(message_data)
# authorized as an admin
resp = self.app.post('/api/message/', data=serialized, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 200)
new_message = Message.get(content='test')
self.assertEqual(new_message.user, self.normal)
resp_json = self.response_json(resp)
self.assertAPIMessage(resp_json, new_message)
def test_auth_edit(self):
self.create_messages()
message_data = {'content': 'edited'}
serialized = json.dumps(message_data)
url = '/api/message/%s/' % self.normal_message.id
# this request is not authorized
resp = self.app.put(url, data=serialized)
self.assertEqual(resp.status_code, 401)
# authorized, but user does not exist in database
resp = self.app.put(url, data=serialized, headers=self.auth_headers('xxx', 'xxx'))
self.assertEqual(resp.status_code, 401)
# authorized, user in database, but not owner
resp = self.app.put(url, data=serialized, headers=self.auth_headers('admin', 'admin'))
self.assertEqual(resp.status_code, 403)
# authorized, user in database, is owner
resp = self.app.put(url, data=serialized, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 200)
obj = Message.get(id=self.normal_message.id)
self.assertEqual(obj.content, 'edited')
def test_edit(self):
self.create_messages()
message_data = {'content': 'edited'}
serialized = json.dumps(message_data)
url = '/api/message/%s/' % self.normal_message.id
# authorized as normal
resp = self.app.put(url, data=serialized, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 200)
message = Message.get(id=self.normal_message.id)
self.assertEqual(message.content, 'edited')
resp_json = self.response_json(resp)
self.assertAPIMessage(resp_json, message)
def test_auth_delete(self):
self.create_messages()
url = '/api/message/%s/' % self.normal_message.id
# this request is not authorized
resp = self.app.delete(url)
self.assertEqual(resp.status_code, 401)
# authorized, but user does not exist in database
resp = self.app.delete(url, headers=self.auth_headers('xxx', 'xxx'))
self.assertEqual(resp.status_code, 401)
# authorized, user in database, not owner
resp = self.app.delete(url, headers=self.auth_headers('admin', 'admin'))
self.assertEqual(resp.status_code, 403)
# authorized, user in database, is owner
resp = self.app.delete(url, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 200)
def test_delete(self):
self.create_messages()
url = '/api/message/%s/' % self.normal_message.id
# authorized as an admin
resp = self.app.delete(url, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 200)
self.assertEqual(Message.select().count(), 1)
resp_json = self.response_json(resp)
self.assertEqual(resp_json, {'deleted': 1})
class RestApiAdminAuthTestCase(RestApiTestCase):
def test_list_get(self):
resp = self.app.get('/api/user/')
resp_json = self.response_json(resp)
self.assertAPIResponse(resp_json, [])
self.assertAPIMeta(resp_json, {'model': 'user', 'next': '', 'page': 1, 'previous': ''})
self.create_users()
resp = self.app.get('/api/user/?ordering=id')
resp_json = self.response_json(resp)
self.assertAPIUsers(resp_json, [
self.admin,
self.normal,
])
resp = self.app.get('/api/user/?admin=True')
self.assertAPIUsers(self.response_json(resp), [self.admin])
resp = self.app.get('/api/user/?admin=False')
self.assertAPIUsers(self.response_json(resp), [self.normal])
def test_detail_get(self):
resp = self.app.get('/api/user/1/')
self.assertEqual(resp.status_code, 404)
self.create_users()
resp = self.app.get('/api/user/%s/' % self.normal.id)
resp_json = self.response_json(resp)
self.assertAPIUser(resp_json, self.normal)
resp = self.app.get('/api/user/%s/' % self.inactive.id)
self.assertEqual(resp.status_code, 404)
def test_auth_create(self):
self.create_users()
new_pass = <PASSWORD>_password('<PASSWORD>')
user_data = {'username': 'test', 'password': <PASSWORD>, 'email': ''}
serialized = json.dumps(user_data)
# this request is not authorized
resp = self.app.post('/api/user/', data=serialized)
self.assertEqual(resp.status_code, 401)
# authorized, but user does not exist in database
resp = self.app.post('/api/user/', data=serialized, headers=self.auth_headers('xxx', 'xxx'))
self.assertEqual(resp.status_code, 401)
# authorized, user in database, but not an administrator
resp = self.app.post('/api/user/', data=serialized, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 401)
# authorized as an admin
resp = self.app.post('/api/user/', data=serialized, headers=self.auth_headers('admin', 'admin'))
self.assertEqual(resp.status_code, 200)
def test_create(self):
self.create_users()
new_pass = make_password('<PASSWORD>')
user_data = {'username': 'test', 'password': <PASSWORD>, 'email': ''}
serialized = json.dumps(user_data)
# authorized as an admin
resp = self.app.post('/api/user/', data=serialized, headers=self.auth_headers('admin', 'admin'))
self.assertEqual(resp.status_code, 200)
new_user = User.get(username='test')
self.assertTrue(check_password('test', new_user.password))
resp_json = self.response_json(resp)
self.assertAPIUser(resp_json, new_user)
def test_auth_edit(self):
self.create_users()
user_data = {'username': 'edited'}
serialized = json.dumps(user_data)
url = '/api/user/%s/' % self.normal.id
# this request is not authorized
resp = self.app.put(url, data=serialized)
self.assertEqual(resp.status_code, 401)
# authorized, but user does not exist in database
resp = self.app.put(url, data=serialized, headers=self.auth_headers('xxx', 'xxx'))
self.assertEqual(resp.status_code, 401)
# authorized, user in database, but not an administrator
resp = self.app.put(url, data=serialized, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 401)
# authorized as an admin
resp = self.app.put(url, data=serialized, headers=self.auth_headers('admin', 'admin'))
self.assertEqual(resp.status_code, 200)
def test_edit(self):
self.create_users()
user_data = {'username': 'edited'}
serialized = json.dumps(user_data)
url = '/api/user/%s/' % self.normal.id
# authorized as an admin
resp = self.app.put(url, data=serialized, headers=self.auth_headers('admin', 'admin'))
self.assertEqual(resp.status_code, 200)
user = User.get(id=self.normal.id)
self.assertEqual(user.username, 'edited')
resp_json = self.response_json(resp)
self.assertAPIUser(resp_json, user)
def test_auth_delete(self):
self.create_users()
url = '/api/user/%s/' % self.normal.id
# this request is not authorized
resp = self.app.delete(url)
self.assertEqual(resp.status_code, 401)
# authorized, but user does not exist in database
resp = self.app.delete(url, headers=self.auth_headers('xxx', 'xxx'))
self.assertEqual(resp.status_code, 401)
# authorized, user in database, but not an administrator
resp = self.app.delete(url, headers=self.auth_headers('normal', 'normal'))
self.assertEqual(resp.status_code, 401)
# authorized as an admin
resp = self.app.delete(url, headers=self.auth_headers('admin', 'admin'))
self.assertEqual(resp.status_code, 200)
def test_delete(self):
self.create_users()
url = '/api/user/%s/' % self.normal.id
# authorized as an admin
resp = self.app.delete(url, headers=self.auth_headers('admin', 'admin'))
| |
# -*- coding: utf-8 -*-
"""
Parsing of grammar files
"""
from typing import Tuple, List, Iterable, FrozenSet, Any
from pyramids.categorization import Category, Property, LinkLabel
from pyramids.rules.conjunction import ConjunctionRule
from pyramids.rules.last_term_match import LastTermMatchRule
from pyramids.rules.one_term_match import OneTermMatchRule
from pyramids.rules.all_terms_match import AllTermsMatchRule
from pyramids.rules.any_term_match import AnyTermMatchRule
from pyramids.rules.head_match import HeadMatchRule
from pyramids.rules.compound_match import CompoundMatchRule
from pyramids.rules.sequence import SequenceRule
from pyramids.rules.subtree_match import SubtreeMatchRule
from pyramids.rules.suffix import SuffixRule
from pyramids.rules.token_set import SetRule
from pyramids.rules.property_inheritance import PropertyInheritanceRule
__all__ = [
'GrammarSyntaxError',
'GrammarParserError',
'GrammarParser',
]
class GrammarParserError(Exception):
"""An error while parsing a grammar file"""
def __init__(self, msg: str = None, filename: str = None, lineno: int = 1, offset: int = 1,
text: str = None):
super().__init__(msg, (filename, lineno, offset, text))
self.msg = msg
self.args = (msg, (filename, lineno, offset, text))
self.filename = filename
self.lineno = lineno
self.offset = offset
self.text = text
def __repr__(self) -> str:
return type(self).__name__ + repr((self.msg,
(self.filename, self.lineno, self.offset, self.text)))
def set_info(self, filename: str = None, lineno: int = None, offset: int = None,
text: str = None) -> None:
"""Set additional information on the exception after it has been raised."""
if filename is not None:
self.filename = filename
if lineno is not None:
self.lineno = lineno
if offset is not None:
self.offset = offset
if text is not None:
self.text = text
self.args = (self.msg, (self.filename, self.lineno, self.offset, self.text))
class GrammarSyntaxError(GrammarParserError, SyntaxError):
"""A syntax error detected in a grammar file"""
def __init__(self, msg: str, filename: str = None, lineno: int = 1, offset: int = 1,
text: str = None):
super().__init__(msg, (filename, lineno, offset, text))
def __repr__(self) -> str:
return super(GrammarParserError, self).__repr__()
class GrammarParser:
"""Parsing of grammar files"""
@staticmethod
def parse_category(definition: str, offset: int = 1) -> Category:
"""Parse a category string, in the syntax used by grammar files."""
definition = definition.strip()
if '(' in definition:
if not definition.endswith(')'):
raise GrammarSyntaxError("Expected: ')' in category definition",
offset=offset + len(definition))
if definition.count('(') > 1:
raise GrammarSyntaxError("Unexpected: '(' in category definition",
offset=offset + definition.find("(",
definition.find("(") + 1))
if definition.count(')') > 1:
raise GrammarSyntaxError("Unexpected: ')' in category definition",
offset=offset + definition.find(")",
definition.find(")") + 1))
name, properties = definition[:-1].split('(')
if ',' in name:
raise GrammarSyntaxError("Unexpected: ',' in category definition",
offset=offset + definition.find(","))
if len(name.split()) > 1:
raise GrammarSyntaxError("Unexpected: white space in category definition",
offset=offset + len(name) + 1)
properties = [prop.strip() for prop in properties.split(',')]
for prop in properties:
if not prop.strip():
if ",," in definition:
raise GrammarSyntaxError("Unexpected: ','",
offset=offset + definition.find(",,") + 1)
elif "(," in definition:
raise GrammarSyntaxError("Unexpected: ','",
offset=offset + definition.find("(,") + 1)
elif ",)" in definition:
raise GrammarSyntaxError("Unexpected: ')'",
offset=offset + definition.find(",)") + 1)
else:
raise GrammarSyntaxError("Unexpected: ')'",
offset=offset + definition.find("()") + 1)
positive = [prop for prop in properties if not prop.startswith('-')]
negative = [prop[1:] for prop in properties if prop.startswith('-')]
for prop in negative:
if prop.startswith('-'):
raise GrammarSyntaxError("Unexpected: '-'",
offset=offset + definition.find('-' + prop))
if prop in positive:
raise GrammarSyntaxError("Unexpected: prop is both positive and negative",
offset=offset + definition.find(prop))
return Category(name, [Property.get(n) for n in positive],
[Property.get(n) for n in negative])
else:
if ')' in definition:
raise GrammarSyntaxError("Unexpected: ')' in category definition",
offset=offset + definition.find(")"))
if ',' in definition:
raise GrammarSyntaxError("Unexpected: ',' in category definition",
offset=offset + definition.find(","))
if len(definition.split()) > 1:
raise GrammarSyntaxError("Unexpected: white space in category definition",
offset=offset + len(definition.split()[0]) + 1)
if not definition:
raise GrammarSyntaxError("Expected: category definition", offset=offset)
return Category(definition)
def parse_branch_rule_term(self, term: str, offset: int = 1) -> Tuple[bool, List[Category]]:
is_head = False
if term.startswith('*'):
term = term[1:]
offset += 1
is_head = True
if '*' in term:
raise GrammarSyntaxError("Unexpected: '*'", offset=offset + term.find('*'))
subcategories = []
subcategory_definitions = term.split('|')
for definition in subcategory_definitions:
subcategory = self.parse_category(definition, offset=offset)
subcategories.append(subcategory)
offset += len(definition) + 1
if not subcategories:
raise GrammarSyntaxError("Expected: category", offset=offset)
return is_head, subcategories
@staticmethod
def parse_branch_rule_link_type(term: str, offset: int = 1) -> Tuple[LinkLabel, bool, bool]:
if '<' in term[1:]:
raise GrammarSyntaxError("Unexpected: '<'",
offset=offset + term.find('<', term.find('<') + 1))
if '>' in term[:-1]:
raise GrammarSyntaxError("Unexpected: '<'", offset=offset + term.find('>'))
left = term.startswith('<')
right = term.endswith('>')
if left:
term = term[1:]
if right:
term = term[:-1]
if not term:
raise GrammarSyntaxError("Expected: link type", offset=offset + left)
return LinkLabel.get(term), left, right
def parse_branch_rule(self, category: Category, definition: str,
offset: int = 1) -> SequenceRule:
subcategory_sets = []
link_types = []
term = ''
term_start = 0
head_index = None
for index in range(len(definition)):
char = definition[index]
if char.isspace():
if not term:
continue
if '>' in term or '<' in term:
if not subcategory_sets:
raise GrammarSyntaxError("Unexpected: link type",
offset=offset + term_start)
link_type, left, right = self.parse_branch_rule_link_type(term,
offset + term_start)
if head_index is None:
if right:
raise GrammarSyntaxError("Unexpected: right link",
offset=offset + term_start)
else:
if left:
raise GrammarSyntaxError("Unexpected: left link",
offset=offset + term_start)
link_types[-1].add((link_type, left, right))
else:
is_head, subcategories = self.parse_branch_rule_term(term,
offset=offset + term_start)
if is_head:
if head_index is not None:
raise GrammarSyntaxError("Unexpected: '*'",
offset=(offset + term_start + term.find('*')))
head_index = len(subcategory_sets)
subcategory_sets.append(subcategories)
link_types.append(set())
term = ''
else:
if not term:
term_start = index
term += char
if term:
is_head, subcategories = self.parse_branch_rule_term(term, offset=offset + term_start)
if is_head:
if head_index is not None:
raise GrammarSyntaxError("Unexpected: '*'",
offset=offset + term_start + term.find('*'))
head_index = len(subcategory_sets)
subcategory_sets.append(subcategories)
link_types.append(set())
if not subcategory_sets:
raise GrammarSyntaxError("Expected: category", offset=offset)
if link_types[-1]:
raise GrammarSyntaxError("Expected: category", offset=offset + term_start + len(term))
link_types = link_types[:-1]
if head_index is None:
if len(subcategory_sets) != 1:
raise GrammarSyntaxError("Expected: '*'", offset=offset + term_start)
head_index = 0
return SequenceRule(category, subcategory_sets, head_index, link_types)
def parse_grammar_definition_file(self, lines: Iterable[str],
filename: str = None) -> List[SequenceRule]:
branch_rules = []
category = None
sequence_found = False
line_number = 0
for raw_line in lines:
line_number += 1
try:
line = raw_line.split('#')[0].rstrip()
if not line:
continue
if line[:1].isspace():
if ':' in line:
raise GrammarSyntaxError("Unexpected: ':'", offset=1 + line.find(':'))
if not category:
raise GrammarSyntaxError("Expected: category header",
offset=1 + line.find(line.strip()))
branch_rules.append(
self.parse_branch_rule(category, line.lstrip(),
offset=1 + line.find(line.lstrip())))
sequence_found = True
else:
if category is not None and not sequence_found:
raise GrammarSyntaxError("Expected: category sequence", offset=1)
if ':' not in line:
raise GrammarSyntaxError("Expected: ':'", offset=1 + len(line))
if line.count(':') > 1:
raise GrammarSyntaxError("Unexpected: ':'",
offset=1 + line.find(':', line.find(':') + 1))
header, sequence = line.split(':')
category = self.parse_category(header)
if sequence.strip():
branch_rules.append(
self.parse_branch_rule(category, sequence.lstrip(),
offset=1 + sequence.find(sequence.lstrip()))
)
sequence_found = True
else:
sequence_found = False
except GrammarParserError as error:
error.set_info(filename=filename, lineno=line_number, text=raw_line)
raise error
except Exception as original_exception:
raise GrammarParserError(filename=filename,
lineno=line_number, text=raw_line) from original_exception
return branch_rules
def parse_match_rule(self, definition: str, offset: int = 1) -> Tuple[SubtreeMatchRule, ...]:
if not definition.startswith('['):
raise GrammarSyntaxError("Expected: '['", offset=offset)
if not definition.endswith(']'):
raise GrammarSyntaxError("Expected: ']'", offset=offset + len(definition) - 1)
generator_map = {
'any_term': AnyTermMatchRule,
'all_terms': AllTermsMatchRule,
'compound': CompoundMatchRule,
'head': HeadMatchRule,
'one_term': OneTermMatchRule,
'last_term': LastTermMatchRule,
}
rule_list = []
for category_definition in definition[1:-1].split():
category = self.parse_category(category_definition,
offset=1 + definition.find(category_definition))
generator = generator_map.get(str(category.name), None)
if generator is None:
raise GrammarSyntaxError("Unexpected: " + repr(category),
offset=1 + definition.find(category_definition))
assert callable(generator)
rule_list.append(generator(category.positive_properties, category.negative_properties))
if not rule_list:
raise GrammarSyntaxError("Expected: category")
return tuple(rule_list)
def parse_conjunction_rule(self,
category: Category,
match_rules: List[Tuple[SubtreeMatchRule, ...]],
property_rules: List[Tuple[FrozenSet[Tuple[Any, bool]],
Tuple[SubtreeMatchRule, ...]]],
definition: str,
offset: int = 1) -> ConjunctionRule:
single = False
compound = False
while definition[:1] in ('+', '-'):
if definition[0] == '+':
if compound:
raise GrammarSyntaxError("Unexpected: '+'", offset=offset)
compound = True
else:
if single:
raise GrammarSyntaxError("Unexpected: '-'", offset=offset)
single = True
definition = definition[1:]
offset += 1
subcategory_sets = []
link_types = []
term = ''
term_start = 0
head_index = None
for index in range(len(definition)):
char = definition[index]
if char.isspace():
if not term:
continue
if '>' in term or '<' in term:
if not subcategory_sets:
raise GrammarSyntaxError("Unexpected: link type",
offset=offset + term_start)
link_type, left, right = self.parse_branch_rule_link_type(term,
offset + term_start)
if head_index is None:
if right:
raise GrammarSyntaxError("Unexpected: right link",
offset=offset + term_start)
else:
if left:
raise GrammarSyntaxError("Unexpected: left link",
offset=offset + term_start)
link_types[-1].add((link_type, left, right))
else:
if len(subcategory_sets) >= 3:
raise GrammarSyntaxError("Unexpected: category",
offset=offset + term_start)
is_head, subcategories = self.parse_branch_rule_term(term,
offset=offset + term_start)
if is_head:
if head_index is not None:
raise GrammarSyntaxError("Unexpected: '*'",
offset=offset + term_start + term.find('*'))
head_index = len(subcategory_sets)
subcategory_sets.append(subcategories)
link_types.append(set())
term = ''
else:
if not term:
term_start | |
<reponame>Jumpscale/sandbox_linux
# Copyright (c) 2013-2015 by <NAME> <<EMAIL>>.
# All rights reserved.
#
# This program and the accompanying materials are made available under
# the terms of the Eclipse Public License v1.0 which accompanies this
# distribution and is available at:
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# <NAME> - initial implementation, API, and documentation
"""Utilities for encoding and decoding ASN.1 DER data
The der_encode function takes a Python value and encodes it in DER
format, returning a byte string. In addition to supporting standard
Python types, BitString can be used to encode a DER bit string,
ObjectIdentifier can be used to encode OIDs, values can be wrapped
in a TaggedDERObject to set an alternate DER tag on them, and
non-standard types can be encoded by placing them in a RawDERObject.
The der_decode function takes a byte string in DER format and decodes
it into the corresponding Python values.
"""
# pylint: disable=bad-whitespace
# ASN.1 object classes
UNIVERSAL = 0x00
APPLICATION = 0x01
CONTEXT_SPECIFIC = 0x02
PRIVATE = 0x03
# ASN.1 universal object tags
END_OF_CONTENT = 0x00
BOOLEAN = 0x01
INTEGER = 0x02
BIT_STRING = 0x03
OCTET_STRING = 0x04
NULL = 0x05
OBJECT_IDENTIFIER = 0x06
UTF8_STRING = 0x0c
SEQUENCE = 0x10
SET = 0x11
IA5_STRING = 0x16
# pylint: enable=bad-whitespace
_asn1_class = ('Universal', 'Application', 'Context-specific', 'Private')
_der_class_by_tag = {}
_der_class_by_type = {}
def _encode_identifier(asn1_class, constructed, tag):
"""Encode a DER object's identifier"""
if asn1_class not in (UNIVERSAL, APPLICATION, CONTEXT_SPECIFIC, PRIVATE):
raise ASN1EncodeError('Invalid ASN.1 class')
flags = (asn1_class << 6) | (0x20 if constructed else 0x00)
if tag < 0x20:
identifier = [flags | tag]
else:
identifier = [tag & 0x7f]
while tag >= 0x80:
tag >>= 7
identifier.append(0x80 | (tag & 0x7f))
identifier.append(flags | 0x1f)
return bytes(identifier[::-1])
class ASN1Error(ValueError):
"""ASN.1 coding error"""
class ASN1EncodeError(ASN1Error):
"""ASN.1 DER encoding error"""
class ASN1DecodeError(ASN1Error):
"""ASN.1 DER decoding error"""
class DERTag:
"""A decorator used by classes which convert values to/from DER
Classes which convert Python values to and from DER format
should use the DERTag decorator to indicate what DER tag value
they understand. When DER data is decoded, the tag is looked
up in the list to see which class to call to perform the
decoding.
Classes which convert existing Python types to and from DER
format can specify the list of types they understand in the
optional "types" argument. Otherwise, conversion is expected
to be to and from the new class being defined.
"""
def __init__(self, tag, types=(), constructed=False):
self._tag = tag
self._types = types
self._identifier = _encode_identifier(UNIVERSAL, constructed, tag)
def __call__(self, cls):
cls.identifier = self._identifier
_der_class_by_tag[self._tag] = cls
if self._types:
for t in self._types:
_der_class_by_type[t] = cls
else:
_der_class_by_type[cls] = cls
return cls
class RawDERObject:
"""A class which can encode a DER object of an arbitrary type
This object is initialized with an ASN.1 class, tag, and a
byte string representing the already encoded data. Such
objects will never have the constructed flag set, since
that is represented here as a TaggedDERObject.
"""
def __init__(self, tag, content, asn1_class):
self.asn1_class = asn1_class
self.tag = tag
self.content = content
def __repr__(self):
return ('RawDERObject(%s, %s, %r)' %
(_asn1_class[self.asn1_class], self.tag, self.content))
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.asn1_class == other.asn1_class and
self.tag == other.tag and self.content == other.content)
def __hash__(self):
return hash((self.asn1_class, self.tag, self.content))
def encode_identifier(self):
"""Encode the DER identifier for this object as a byte string"""
return _encode_identifier(self.asn1_class, False, self.tag)
def encode(self):
"""Encode the content for this object as a DER byte string"""
return self.content
class TaggedDERObject:
"""An explicitly tagged DER object
This object provides a way to wrap an ASN.1 object with an
explicit tag. The value (including the tag representing its
actual type) is then encoded as part of its value. By
default, the ASN.1 class for these objects is CONTEXT_SPECIFIC,
and the DER encoding always marks these values as constructed.
"""
def __init__(self, tag, value, asn1_class=CONTEXT_SPECIFIC):
self.asn1_class = asn1_class
self.tag = tag
self.value = value
def __repr__(self):
if self.asn1_class == CONTEXT_SPECIFIC:
return 'TaggedDERObject(%s, %r)' % (self.tag, self.value)
else:
return ('TaggedDERObject(%s, %s, %r)' %
(_asn1_class[self.asn1_class], self.tag, self.value))
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.asn1_class == other.asn1_class and
self.tag == other.tag and self.value == other.value)
def __hash__(self):
return hash((self.asn1_class, self.tag, self.value))
def encode_identifier(self):
"""Encode the DER identifier for this object as a byte string"""
return _encode_identifier(self.asn1_class, True, self.tag)
def encode(self):
"""Encode the content for this object as a DER byte string"""
return der_encode(self.value)
@DERTag(NULL, (type(None),))
class _Null:
"""A null value"""
@staticmethod
def encode(value):
"""Encode a DER null value"""
# pylint: disable=unused-argument
return b''
@classmethod
def decode(cls, constructed, content):
"""Decode a DER null value"""
if constructed:
raise ASN1DecodeError('NULL should not be constructed')
if content:
raise ASN1DecodeError('NULL should not have associated content')
return None
@DERTag(BOOLEAN, (bool,))
class _Boolean:
"""A boolean value"""
@staticmethod
def encode(value):
"""Encode a DER boolean value"""
return b'\xff' if value else b'\0'
@classmethod
def decode(cls, constructed, content):
"""Decode a DER boolean value"""
if constructed:
raise ASN1DecodeError('BOOLEAN should not be constructed')
if content not in {b'\x00', b'\xff'}:
raise ASN1DecodeError('BOOLEAN content must be 0x00 or 0xff')
return bool(content[0])
@DERTag(INTEGER, (int,))
class _Integer:
"""An integer value"""
@staticmethod
def encode(value):
"""Encode a DER integer value"""
l = value.bit_length()
l = l // 8 + 1 if l % 8 == 0 else (l + 7) // 8
result = value.to_bytes(l, 'big', signed=True)
return result[1:] if result.startswith(b'\xff\x80') else result
@classmethod
def decode(cls, constructed, content):
"""Decode a DER integer value"""
if constructed:
raise ASN1DecodeError('INTEGER should not be constructed')
return int.from_bytes(content, 'big', signed=True)
@DERTag(OCTET_STRING, (bytes, bytearray))
class _OctetString:
"""An octet string value"""
@staticmethod
def encode(value):
"""Encode a DER octet string"""
return value
@classmethod
def decode(cls, constructed, content):
"""Decode a DER octet string"""
if constructed:
raise ASN1DecodeError('OCTET STRING should not be constructed')
return content
@DERTag(UTF8_STRING, (str,))
class _UTF8String:
"""A UTF-8 string value"""
@staticmethod
def encode(value):
"""Encode a DER UTF-8 string"""
return value.encode('utf-8')
@classmethod
def decode(cls, constructed, content):
"""Decode a DER UTF-8 string"""
if constructed:
raise ASN1DecodeError('UTF8 STRING should not be constructed')
return content.decode('utf-8')
@DERTag(SEQUENCE, (list, tuple), constructed=True)
class _Sequence:
"""A sequence of values"""
@staticmethod
def encode(value):
"""Encode a sequence of DER values"""
return b''.join(der_encode(item) for item in value)
@classmethod
def decode(cls, constructed, content):
"""Decode a sequence of DER values"""
if not constructed:
raise ASN1DecodeError('SEQUENCE should always be constructed')
offset = 0
length = len(content)
value = []
while offset < length:
# pylint: disable=unpacking-non-sequence
item, consumed = der_decode(content[offset:], partial_ok=True)
# pylint: enable=unpacking-non-sequence
value.append(item)
offset += consumed
return tuple(value)
@DERTag(SET, (set, frozenset), constructed=True)
class _Set:
"""A set of DER values"""
@staticmethod
def encode(value):
"""Encode a set of DER values"""
return b''.join(sorted(der_encode(item) for item in value))
@classmethod
def decode(cls, constructed, content):
"""Decode a set of DER values"""
if not constructed:
raise ASN1DecodeError('SET should always be constructed')
offset = 0
length = len(content)
value = set()
while offset < length:
# pylint: disable=unpacking-non-sequence
item, consumed = der_decode(content[offset:], partial_ok=True)
# pylint: enable=unpacking-non-sequence
value.add(item)
offset += consumed
return frozenset(value)
@DERTag(BIT_STRING)
class BitString:
"""A string of bits
This object can be initialized either with a byte string and an
optional count of the number of least-significant bits in the last
byte which should not be included in the value, or with a string
consisting only of the digits '0' and '1'.
An optional 'named' flag can also be set, indicating that the
BitString was specified with named bits, indicating that the proper
DER encoding of it should strip any trailing zeroes.
"""
def __init__(self, value, unused=0, named=False):
if unused < 0 or unused > 7:
raise ASN1EncodeError('Unused bit count must be between 0 and 7')
if isinstance(value, bytes):
if unused:
if not value:
raise ASN1EncodeError('Can\'t have unused bits with empty '
'value')
elif value[-1] & ((1 << unused) - 1):
raise ASN1EncodeError('Unused bits in value should be '
'zero')
elif isinstance(value, str):
if unused:
raise ASN1EncodeError('Unused bit count should not be set '
'when providing a string')
used = len(value) % 8
unused = 8 - used if used else 0
value += unused * '0'
value = bytes(int(value[i:i+8], 2)
for i in range(0, len(value), 8))
else:
raise ASN1EncodeError('Unexpected type of bit string value')
if named:
while value and not value[-1] & (1 << unused):
unused += 1
if unused == 8:
value | |
and nans in mag
# set to zero points that are not defined or inf
# and mark them on axis
isnan_arr = np.isnan(mag)
for i in range(x_len):
for j in range(y_len):
if isnan_arr[i, j]:
# colour this region as a shaded square
rect = patch.Rectangle((self.xg[i, j] - dist_points/2, self.yg[i, j] - dist_points/2), dist_points, dist_points, color='#B5B5B5')
axis.add_patch(rect)
mag[i, j] = 0
if abs(mag[i, j]) == np.inf or abs(mag[i, j]) > 1e15:
# colour this point as a big red dot
circ = patch.Circle((self.xg[i, j], self.yg[i, j]), L*self.fract/3, color='red')
axis.add_patch(circ)
mag[i, j] = 0
# #########################################################################
# use the the direction of arrows to define stack properties
# #########################################################################
# define length of sheet as a fraction of total graph scale
# this also sets max, total height of stack (along its direction)
s_L = self.fract * L
# #########################################################################
# define stack based on geometrical arguments
# sheets perp. to hypothetical arrow, shifted along it
# their density porp to mag, + arrowhead on top
# #########################################################################
# find the maximum magnitude for scaling
max_size = np.max(mag)
# setrelative scaling, linear or logarithmic
if self.logarithmic_scale_bool:
mag1 = mag + 1
logmag1 = np.log(mag1)
R = logmag1/np.max(logmag1) # Re-assign R
else:
R = mag/max_size
# define tigonometirc shifts
I_sin = np.sin(angles)
I_cos = np.cos(angles)
# precalculate heavy operations
# define the points that set out a line of the stack sheet (middle line)
A_x = self.xg + (s_L/2)*I_sin
A_y = self.yg - (s_L/2)*I_cos
B_x = self.xg - (s_L/2)*I_sin
B_y = self.yg + (s_L/2)*I_cos
# define points of stack arrowheads as arrays for all stacks
p_sh1x = self.xg + (s_L/2)*I_cos + (s_L*self.w_head)*I_sin
p_sh1y = self.yg + (s_L/2)*I_sin - (s_L*self.w_head)*I_cos
p_sh2x = self.xg + (s_L/2)*I_cos - (s_L*self.w_head)*I_sin
p_sh2y = self.yg + (s_L/2)*I_sin + (s_L*self.w_head)*I_cos
p_sh3x = self.xg + (s_L*0.5 + s_L*self.h_head)*I_cos
p_sh3y = self.yg + (s_L*0.5 + s_L*self.h_head)*I_sin
# special case, when there is only 1 line in the stack plot:
P_sh1x = self.xg + (s_L*self.w_head)*I_sin
P_sh1y = self.yg - (s_L*self.w_head)*I_cos
P_sh2x = self.xg - (s_L*self.w_head)*I_sin
P_sh2y = self.yg + (s_L*self.w_head)*I_cos
P_sh3x = self.xg + (s_L*self.h_head)*I_cos
P_sh3y = self.yg + (s_L*self.h_head)*I_sin
# array of number of sheets for each stack
for i in range(self.s_max - self.s_min + 1):
t = self.s_max - i
R_int[R <= t/self.s_max] = t
# loop over each coordinate plotting
for i in range(x_len):
for j in range(y_len):
# varible for current considered magnitude as it is reused
# avoids extracting from R many times.
n = R_int[i, j]
# do not plot anything if magnitude is exactly zero
if mag[i,j] == 0:
continue
# deal with even number of sheets from magnitudes:
if n % 2 == 0:
# parameter to loop over in the recursion equation
s = 0
# points for sheets required for the given magnitude
# from these define all the needed lines and plot them
while s <= 0.5*(n-2): # maximum set by equations (documentation)
# define all the points for the 2 currently looped +- sheets in while loop
Ax1 = A_x[i, j] + G(s, n, 0)*s_L*I_cos[i, j]
Ay1 = A_y[i, j] + G(s, n, 0)*s_L*I_sin[i, j]
Bx1 = B_x[i, j] + G(s, n, 0)*s_L*I_cos[i, j]
By1 = B_y[i, j] + G(s, n, 0)*s_L*I_sin[i, j]
Ax2 = A_x[i, j] - G(s, n, 0)*s_L*I_cos[i, j]
Ay2 = A_y[i, j] - G(s, n, 0)*s_L*I_sin[i, j]
Bx2 = B_x[i, j] - G(s, n, 0)*s_L*I_cos[i, j]
By2 = B_y[i, j] - G(s, n, 0)*s_L*I_sin[i, j]
# from these, define the 2 lines, for this run
axis.add_line(Line2D((Ax1, Bx1), (Ay1, By1), linewidth=1, color=self.color))
axis.add_line(Line2D((Ax2, Bx2), (Ay2, By2), linewidth=1, color=self.color))
# update parameter to reapet and draw all needed arrows
s += 1
# deal with the odd number of stacks:
else:
# Add the centre line for odd numbers of stacks
axis.add_line(Line2D((A_x[i, j], B_x[i, j]), (A_y[i, j], B_y[i, j]), linewidth=1, color=self.color))
# then loop over the remaining lines as per the recursion formula:
s = 1 # exclude already completed 0
# define all remaining sheets for the magnitude:
while s <= 0.5*(n-1): # maximum set by equations (documentation)
# define all the points for the current +- displacement in while loop
Ax1 = A_x[i, j] + G(s, n, 1)*s_L*I_cos[i, j]
Ay1 = A_y[i, j] + G(s, n, 1)*s_L*I_sin[i, j]
Bx1 = B_x[i, j] + G(s, n, 1)*s_L*I_cos[i, j]
By1 = B_y[i, j] + G(s, n, 1)*s_L*I_sin[i, j]
Ax2 = A_x[i, j] - G(s, n, 1)*s_L*I_cos[i, j]
Ay2 = A_y[i, j] - G(s, n, 1)*s_L*I_sin[i, j]
Bx2 = B_x[i, j] - G(s, n, 1)*s_L*I_cos[i, j]
By2 = B_y[i, j] - G(s, n, 1)*s_L*I_sin[i, j]
# from these, define the 2 displaced lines
axis.add_line(Line2D((Ax1,Bx1),(Ay1,By1), linewidth=1, color=self.color))
axis.add_line(Line2D((Ax2,Bx2),(Ay2,By2), linewidth=1, color=self.color))
# update parameter
s += 1
# dela with arrowheads
if self.arrowheads:
# from central sheet for n=1 or on top sheet for n>1
if n > 1: # for all lines but the single sheet one
axis.add_line(Line2D((p_sh1x[i, j],p_sh3x[i, j]),(p_sh1y[i, j],p_sh3y[i, j]), linewidth=1, color = self.color))
axis.add_line(Line2D((p_sh2x[i, j],p_sh3x[i, j]),((p_sh2y[i, j],p_sh3y[i, j])), linewidth=1, color = self.color))
else:
# when only 1-sheet is drawn
axis.add_line(Line2D((P_sh1x[i, j], P_sh3x[i, j]), (P_sh1y[i, j], P_sh3y[i, j]), linewidth=1, color = self.color))
axis.add_line(Line2D((P_sh2x[i, j], P_sh3x[i, j]), ((P_sh2y[i, j], P_sh3y[i, j])), linewidth=1, color = self.color))
else:
pass
# method to find its exterior derivative
def ext_d(self):
'''
ext_d()
Computes the exterior derivative and returns it
as the 2-form object
'''
if self.form_1_str_x == None or self.form_1_str_y == None:
# ERROR
raise ValueError('Error: You need to supply the 1-form equations to do this, look at \'give_eqn\' method')
else:
# the strings have been correctly given, compute the
# exterior derivative
# get the inpus from fields of x and u components
x_comp_str = self.form_1_str_x
y_comp_str = self.form_1_str_y
# from found u and v in the interior derivative, set up sympy components
sympy_expr_x = parse_expr(x_comp_str, evaluate=False)
sympy_expr_y = parse_expr(y_comp_str, evaluate=False)
# combine the 2 into a list:
expressions = np.array([sympy_expr_x, sympy_expr_y])
# set up an array of coordinates that need to be used (in standard order)
coords = ['x', 'y']
# set up dimensionality
m = 2
# from these get the 2-form
result = find_2_form(expressions, coords, self.xg, self.yg, zg=None, m=m)
# format, and evaluate
# get the string of this new 2-form
form_2_str = str(simplify(result[0][0]))
# keep a local, unformatted version of this
# to supply to form_2
form_2_str_loc = form_2_str*1
# numerically evaluate it, careful about constants
# to evaluate it, make sure to use grids
form_2_str = form_2_str.replace('x', '(self.xg)')
form_2_str = form_2_str.replace('y', '(self.yg)')
if form_2_str.find('x') & form_2_str.find('y') == -1:
form_2_str = '(' + str(form_2_str) + ')* np.ones(np.shape(self.xg))'
# evaluate, set up new object and return
form_2_result = eval(form_2_str)
result_form = form_2(self.xg, self.yg, form_2_result, form_2_str_loc)
# return it to the user
return result_form
# define a funciton to complete numerical only curl
def num_ext_d(self):
'''
Takes in no arguments
computes the exterior derivative numerically only
The equations do not need to be given
If given, they do not get passed onto the 2-form object anyway
NUMERICAL ONLY, they will be lost!
returns 2-form object
'''
# get steps in dx and dy:
dx = self.xg[0, :]
dy = self.yg[:, 0]
# copy F_x and F_y, locally
fx = self.F_x + np.zeros(np.shape(self.xg))
fy = self.F_y + np.zeros(np.shape(self.xg))
# clean up F_x and F_y from nan etc
for i in range(len(self.xg[:, 0])):
for j in range(len(self.yg[0, :])):
# correct for ill defined values
if isnan(fx[i, j]):
fx[i, j] = 0
if isnan(fy[i, j]):
fy[i, j] = 0
if abs(fx[i, j]) == np.inf or abs(fx[i, j]) > 1e15:
fx[i, j] = 1e10
if abs(fy[i, j]) | |
with the returned read value as
well as the completion:
oncomplete(completion, data_read)
:param object_name: name of the object to read from
:type object_name: str
:param length: the number of bytes to read
:type length: int
:param offset: byte offset in the object to begin reading from
:type offset: int
:param oncomplete: what to do when the read is complete
:type oncomplete: completion
:raises: :class:`Error`
:returns: completion object
"""
buf = create_string_buffer(length)
def oncomplete_(completion_v):
return_value = completion_v.get_return_value()
return oncomplete(completion_v,
ctypes.string_at(buf, return_value) if return_value >= 0 else None)
completion = self.__get_completion(oncomplete_, None)
ret = run_in_thread(self.librados.rados_aio_read,
(self.io, c_char_p(object_name),
completion.rados_comp, buf, c_size_t(length),
c_uint64(offset)))
if ret < 0:
raise make_ex(ret, "error reading %s" % object_name)
return completion
def aio_remove(self, object_name, oncomplete=None, onsafe=None):
"""
Asychronously remove an object
:param object_name: name of the object to remove
:type object_name: str
:param oncomplete: what to do when the remove is safe and complete in memory
on all replicas
:type oncomplete: completion
:param onsafe: what to do when the remove is safe and complete on storage
on all replicas
:type onsafe: completion
:raises: :class:`Error`
:returns: completion object
"""
completion = self.__get_completion(oncomplete, onsafe)
ret = run_in_thread(self.librados.rados_aio_remove,
(self.io, c_char_p(object_name),
completion.rados_comp))
if ret < 0:
raise make_ex(ret, "error removing %s" % object_name)
return completion
def require_ioctx_open(self):
"""
Checks if the rados.Ioctx object state is 'open'
:raises: IoctxStateError
"""
if self.state != "open":
raise IoctxStateError("The pool is %s" % self.state)
def change_auid(self, auid):
"""
Attempt to change an io context's associated auid "owner."
Requires that you have write permission on both the current and new
auid.
:raises: :class:`Error`
"""
self.require_ioctx_open()
ret = run_in_thread(self.librados.rados_ioctx_pool_set_auid,
(self.io, ctypes.c_uint64(auid)))
if ret < 0:
raise make_ex(ret, "error changing auid of '%s' to %d" %\
(self.name, auid))
def set_locator_key(self, loc_key):
"""
Set the key for mapping objects to pgs within an io context.
The key is used instead of the object name to determine which
placement groups an object is put in. This affects all subsequent
operations of the io context - until a different locator key is
set, all objects in this io context will be placed in the same pg.
:param loc_key: the key to use as the object locator, or NULL to discard
any previously set key
:type loc_key: str
:raises: :class:`TypeError`
"""
self.require_ioctx_open()
if not isinstance(loc_key, str):
raise TypeError('loc_key must be a string')
run_in_thread(self.librados.rados_ioctx_locator_set_key,
(self.io, c_char_p(loc_key)))
self.locator_key = loc_key
def get_locator_key(self):
"""
Get the locator_key of context
:returns: locator_key
"""
return self.locator_key
def set_namespace(self, nspace):
"""
Set the namespace for objects within an io context.
The namespace in addition to the object name fully identifies
an object. This affects all subsequent operations of the io context
- until a different namespace is set, all objects in this io context
will be placed in the same namespace.
:param nspace: the namespace to use, or None/"" for the default namespace
:type nspace: str
:raises: :class:`TypeError`
"""
self.require_ioctx_open()
if nspace is None:
nspace = ""
if not isinstance(nspace, str):
raise TypeError('namespace must be a string')
run_in_thread(self.librados.rados_ioctx_set_namespace,
(self.io, c_char_p(nspace)))
self.nspace = nspace
def get_namespace(self):
"""
Get the namespace of context
:returns: namespace
"""
return self.nspace
def close(self):
"""
Close a rados.Ioctx object.
This just tells librados that you no longer need to use the io context.
It may not be freed immediately if there are pending asynchronous
requests on it, but you should not use an io context again after
calling this function on it.
"""
if self.state == "open":
self.require_ioctx_open()
run_in_thread(self.librados.rados_ioctx_destroy, (self.io,))
self.state = "closed"
def write(self, key, data, offset=0):
"""
Write data to an object synchronously
:param key: name of the object
:type key: str
:param data: data to write
:type data: str
:param offset: byte offset in the object to begin writing at
:type offset: int
:raises: :class:`TypeError`
:raises: :class:`LogicError`
:returns: int - 0 on success
"""
self.require_ioctx_open()
if not isinstance(key, str):
raise TypeError('key must be a string')
if not isinstance(data, str):
raise TypeError('data must be a string')
length = len(data)
ret = run_in_thread(self.librados.rados_write,
(self.io, c_char_p(key), c_char_p(data),
c_size_t(length), c_uint64(offset)))
if ret == 0:
return ret
elif ret < 0:
raise make_ex(ret, "Ioctx.write(%s): failed to write %s" % \
(self.name, key))
else:
raise LogicError("Ioctx.write(%s): rados_write \
returned %d, but should return zero on success." % (self.name, ret))
def write_full(self, key, data):
"""
Write an entire object synchronously.
The object is filled with the provided data. If the object exists,
it is atomically truncated and then written.
:param key: name of the object
:type key: str
:param data: data to write
:type data: str
:raises: :class:`TypeError`
:raises: :class:`Error`
:returns: int - 0 on success
"""
self.require_ioctx_open()
if not isinstance(key, str):
raise TypeError('key must be a string')
if not isinstance(data, str):
raise TypeError('data must be a string')
length = len(data)
ret = run_in_thread(self.librados.rados_write_full,
(self.io, c_char_p(key), c_char_p(data),
c_size_t(length)))
if ret == 0:
return ret
elif ret < 0:
raise make_ex(ret, "Ioctx.write_full(%s): failed to write %s" % \
(self.name, key))
else:
raise LogicError("Ioctx.write_full(%s): rados_write_full \
returned %d, but should return zero on success." % (self.name, ret))
def append(self, key, data):
"""
Append data to an object synchronously
:param key: name of the object
:type key: str
:param data: data to write
:type data: str
:raises: :class:`TypeError`
:raises: :class:`LogicError`
:returns: int - 0 on success
"""
self.require_ioctx_open()
if not isinstance(key, str):
raise TypeError('key must be a string')
if not isinstance(data, str):
raise TypeError('data must be a string')
length = len(data)
ret = run_in_thread(self.librados.rados_append,
(self.io, c_char_p(key), c_char_p(data),
c_size_t(length)))
if ret == 0:
return ret
elif ret < 0:
raise make_ex(ret, "Ioctx.append(%s): failed to append %s" % \
(self.name, key))
else:
raise LogicError("Ioctx.append(%s): rados_append \
returned %d, but should return zero on success." % (self.name, ret))
def read(self, key, length=8192, offset=0):
"""
Read data from an object synchronously
:param key: name of the object
:type key: str
:param length: the number of bytes to read (default=8192)
:type length: int
:param offset: byte offset in the object to begin reading at
:type offset: int
:raises: :class:`TypeError`
:raises: :class:`Error`
:returns: str - data read from object
"""
self.require_ioctx_open()
if not isinstance(key, str):
raise TypeError('key must be a string')
ret_buf = create_string_buffer(length)
ret = run_in_thread(self.librados.rados_read,
(self.io, c_char_p(key), ret_buf, c_size_t(length),
c_uint64(offset)))
if ret < 0:
raise make_ex(ret, "Ioctx.read(%s): failed to read %s" % (self.name, key))
return ctypes.string_at(ret_buf, ret)
def get_stats(self):
"""
Get pool usage statistics
:returns: dict - contains the following keys:
- ``num_bytes`` (int) - size of pool in bytes
- ``num_kb`` (int) - size of pool in kbytes
- ``num_objects`` (int) - number of objects in the pool
- ``num_object_clones`` (int) - number of object clones
- ``num_object_copies`` (int) - number of object copies
- ``num_objects_missing_on_primary`` (int) - number of objets
missing on primary
- ``num_objects_unfound`` (int) - number of unfound objects
- ``num_objects_degraded`` (int) - number of degraded objects
- ``num_rd`` (int) - bytes read
- ``num_rd_kb`` (int) - kbytes read
- ``num_wr`` (int) - bytes written
- ``num_wr_kb`` (int) - kbytes written
"""
self.require_ioctx_open()
stats = rados_pool_stat_t()
ret = run_in_thread(self.librados.rados_ioctx_pool_stat,
(self.io, byref(stats)))
if ret < 0:
raise make_ex(ret, "Ioctx.get_stats(%s): get_stats failed" % self.name)
return {'num_bytes': stats.num_bytes,
'num_kb': stats.num_kb,
'num_objects': stats.num_objects,
'num_object_clones': stats.num_object_clones,
'num_object_copies': stats.num_object_copies,
"num_objects_missing_on_primary": stats.num_objects_missing_on_primary,
"num_objects_unfound": stats.num_objects_unfound,
"num_objects_degraded": stats.num_objects_degraded,
"num_rd": stats.num_rd,
"num_rd_kb": stats.num_rd_kb,
"num_wr": stats.num_wr,
"num_wr_kb": stats.num_wr_kb }
def remove_object(self, key):
"""
Delete an object
This does not delete any snapshots of the object.
:param key: the name of the object to delete
:type key: str
:raises: :class:`TypeError`
:raises: :class:`Error`
:returns: bool - True on success
"""
self.require_ioctx_open()
if not isinstance(key, str):
raise TypeError('key must be a string')
ret = run_in_thread(self.librados.rados_remove,
(self.io, c_char_p(key)))
if ret < 0:
raise make_ex(ret, "Failed to remove '%s'" % key)
return True
def trunc(self, key, size):
"""
Resize an object
If this enlarges the object, the new area is logically filled with
zeroes. If this shrinks the object, the excess data is removed.
:param key: the name of the object to | |
import re
import ssl
import sys
import json
import base64
import urllib2
import threading
from core.alert import *
from core._time import now
from subprocess import Popen, PIPE
from core.targets import target_type
from core.log import __log_into_file
devs = {};
ipList = []
httpPort = 80
debug = 0
scanid = ''
scancmd = ''
lang = ''
loginfile = ''
devCfgUrl = ''
numOfIps = 0
TIME_OUT = 15
def readDevices():
global devs
if(devCfgUrl != ""):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
req = urllib2.Request(devCfgUrl, headers={ 'User-Agent': 'Mozilla/5.0' })
req.get_method = lambda: 'GET'
try:
buff = urllib2.urlopen(req, context=context, timeout=TIME_OUT).read()
except:
error("Faild to read config file from: " + str(devCfgUrl))
else:
with open("lib/scan/iot/devices.cfg", "r") as f:
buff = f.read(0x1000000)
devs = json.loads(buff)
#print json.dumps(devs, indent=4)
def search4devType(body, headers):
for e in devs.keys():
p = devs[e]['devTypePattern']
if p[0][0] == "header":
try:
tmp = headers[p[0][1]]
except:
tmp = ""
elif p[0][0] == "body":
if p[0][1] == "":
tmp = body
else:
pattern = "<" + str(p[0][1]) + ">(.*?)</" + str(p[0][1]) + ">"
match = re.search(pattern, body)
if match:
tmp = match.group(1) if match else ''
else:
continue
p = devs[e]['devTypePattern'][1]
tlen = len(p)
if p[0] == "==":
if tmp.decode('utf-8') == p[1]:
return e
elif re.match(r'^regex', p[0]):
for i in range(1, tlen + 1):
try:
pattern = p[i]
match = re.search(pattern, tmp)
if not match:
break
except:
pass
if i == tlen:
return e
elif p[0] == "substr":
try:
tmp = tmp.decode('utf-8')
except:
pass
try:
if tmp.index(p[1]) >= 0:
return e
except ValueError:
pass
return ""
def getRefreshUrl(prevUrl, body):
newUrl = ''
tmpBody = body
while re.match('\<META\s+[^\>]*url=(.*?)>', tmpBody, re.IGNORECASE):
tmpBody = match.string[match.end():]
tmp = match.group()
if re.match('^[\"\'](.*?)[\"\']', tmp):
newUrl = match.group()
break
elif re.match('^(.*?)[\>\"\s]', tmp):
newUrl = match.group()
break
if newUrl != "" and newUrl != prevUrl:
return newUrl
else:
return ""
def match(body):
title = ''
match = re.search('<title>(.*?)<\/title>', body)
if not match:
return ""
title = match.group()
for e in devs.keys():
patterns = devs[e]['devTypePattern']
isMatch = 1
for f in patterns:
if not re.match(f, title):
isMatch = 0
break
if isMatch:
return e
return ""
def search4login(ctx, body):
devType = match(body)
if devType == "":
error("didnot find devType for " + ctx[ip])
return
pattern = devs[devType]['loginUrlPattern']
match = re.search(pattern, body)
if match:
return match.group()
return ""
def substitute(postdata, extracetdData):
mystr = postdata
p = extracetdData
ret = ''
while(re.match(r'\$(\d+)', mystr)):
match = re.search(r'\$(\d+)', mystr)
ret += match.string[:match.start()] + p[match.group() - 1]
mystr = match.string[match.end():]
ret += mystr
return ret
def check_login(ctx, body, port):
headersss = {}
url = composeURL(ctx, port)
dev = ctx['dev']
if dev['auth'][0] == "basic":
if dev['auth'][1] == "":
def http_get(url):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
req = urllib2.Request(url, headers={ 'User-Agent': 'Mozilla/5.0' })
req.get_method = lambda: 'GET'
try:
res = urllib2.urlopen(req, context=context, timeout=TIME_OUT)
body = res.read()
headers = {"Status":res.getcode(), "location":res.geturl(), "Content-Length":len(body)}
for hdr in res.info():
headers.update({hdr:res.info()[hdr]})
except urllib2.HTTPError as e:
body = e.read()
headers = {"Status":e.getcode(), "location":e.geturl(), "Content-Length":len(body)}
for hdr in e.info():
headers.update({hdr:e.info()[hdr]})
except urllib2.URLError as e:
body = ''
headers = {"Status":595, "location":url, "Content-Length":len(body)}
def sub(headers):
status = int(headers[1]['Status'])
if int(status) == 200:
resp = "device " + ctx['ip'] + " is of type " + ctx['devType'] + " still has default password"
info(resp)
data = json.dumps({'HOST': ctx['ip'], 'USERNAME': '', 'PASSWORD': '', 'PORT': '',
'TYPE': 'iot_scan', 'DESCRIPTION': str(resp),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scanid, 'SCAN_CMD': scancmd}) + "\n"
else:
resp = "device " + ctx['ip'] + " is of type " + ctx['devType'] + " has changed password"
info(resp)
data = json.dumps({'HOST': ctx['ip'], 'USERNAME': '', 'PASSWORD': '', 'PORT': '',
'TYPE': 'iot_scan', 'DESCRIPTION': str(resp),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scanid, 'SCAN_CMD': scancmd}) + "\n"
__log_into_file(loginfile, 'a', data, lang)
return
sub(headers)
http_get(url)
tmp = "Basic " + base64.encodestring(dev['auth'][1])
tmp.strip()
headersss.update({"Authorization": tmp})
elif dev['auth'][0] == "form":
subType = dev['auth'][1]
postdata = dev['auth'][2]
try:
dev['extractFormData']
for e in dev['extractFormData']:
match = re.search(e, body)
if math:
try:
ctx['extractedData']
except:
ctx['extractedData'] = []
ctx['extractedData'].append(match.group(1))
except:
pass
if re.match(r'^sub', subType):
subType = ""
postdata = substitute(postdata, ctx['extractedData'])
if subType == "":
def http_post(url, postdata, dev):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
req = urllib2.Request(url, headers={ 'User-Agent': 'Mozilla/5.0' }, data=postdata)
req.get_method = lambda: 'POST'
try:
res = urllib2.urlopen(req, context=context, timeout=TIME_OUT)
body = res.read()
headers = {"Status":res.getcode(), "location":res.geturl(), "Content-Length":len(body)}
for hdr in res.info():
headers.update({hdr:res.info()[hdr]})
except urllib2.HTTPError as e:
body = e.read()
headers = {"Status":e.getcode(), "location":e.geturl(), "Content-Length":len(body)}
for hdr in e.info():
headers.update({hdr:e.info()[hdr]})
except urllib2.URLError as e:
body = ''
headers = {"Status":595, "location":url, "Content-Length":len(body)}
def sub1(body, headers, dev):
if dev['auth'][3] == "body":
if dev['auth'][4] == "regex":
pattern = dev['auth'][5]
if re.match(pattern, body):
resp = "device " + ctx['ip'] + " is of type " + ctx['devType'] + " still has default password"
info(resp)
data = json.dumps({'HOST': ctx['ip'], 'USERNAME': '', 'PASSWORD': '', 'PORT': '',
'TYPE': 'iot_scan', 'DESCRIPTION': str(resp),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scanid, 'SCAN_CMD': scancmd}) + "\n"
else:
resp = "device " + ctx['ip'] + " of type " + ctx['devType'] + " has changed password"
info(resp)
data = json.dumps({'HOST': ctx['ip'], 'USERNAME': '', 'PASSWORD': '', 'PORT': '',
'TYPE': 'iot_scan', 'DESCRIPTION': str(resp),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scanid, 'SCAN_CMD': scancmd}) + "\n"
__log_into_file(loginfile, 'a', data, lang)
return
elif dev['auth'][4] == "!substr":
body = body.decode('utf-8')
if body.index(dev['auth'])[5] < 0:
resp = "device " + ctx['ip'] + " is of type " + ctx['devType'] + " still has default password"
info(resp)
data = json.dumps({'HOST': ctx['ip'], 'USERNAME': '', 'PASSWORD': '', 'PORT': '',
'TYPE': 'iot_scan', 'DESCRIPTION': str(resp),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scanid, 'SCAN_CMD': scancmd}) + "\n"
else:
resp = "device " + ctx['ip'] + " of type " + ctx['devType'] + " has changed password"
info(resp)
data = json.dumps({'HOST': ctx['ip'], 'USERNAME': '', 'PASSWORD': '', 'PORT': '',
'TYPE': 'iot_scan', 'DESCRIPTION': str(resp),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scanid, 'SCAN_CMD': scancmd}) + "\n"
__log_into_file(loginfile, 'a', data, lang)
return
sub1(body, headers, dev)
http_post(url, postdata, dev)
if debug:
warn("checking login on " + url)
def http_get1(url, hdrs, ctx):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
req = urllib2.Request(url, headers=hdrs)
req.get_method = lambda: 'GET'
try:
res = urllib2.urlopen(req, context=context, timeout=TIME_OUT)
body = res.read()
headers = {"Status":res.getcode(), "location":res.geturl(), "Content-Length":len(body)}
for hdr in res.info():
headers.update({hdr:res.info()[hdr]})
except urllib2.HTTPError as err:
body = err.read()
headers = {"Status":err.getcode(), "location":err.geturl(), "Content-Length":len(body)}
for hdr in err.info():
headers.update({hdr:err.info()[hdr]})
except urllib2.URLError as err:
body = ''
headers = {"Status":595, "location":url, "Content-Length":len(body)}
def sub2(headers, ctx):
status = int(headers['Status'])
if debug:
print "check_login status=" + str(status)
data = ''
if int(status) == 200:
if ctx['dev']['auth'][0] == "basic":
resp = "device " + ctx['ip'] + " is of type " + ctx['devType'] + " still has default password"
info(resp)
data = json.dumps({'HOST': ctx['ip'], 'USERNAME': '', 'PASSWORD': '', 'PORT': '',
'TYPE': 'iot_scan', 'DESCRIPTION': str(resp),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scanid, 'SCAN_CMD': scancmd}) + "\n"
elif ctx['dev']['auth'][0] == "expect200":
resp = "device " + ctx['ip'] + " is of type " + ctx['devType'] + "does not have any password"
info(resp)
data = json.dumps({'HOST': ctx['ip'], 'USERNAME': '', 'PASSWORD': '', 'PORT': '',
'TYPE': 'iot_scan', 'DESCRIPTION': str(resp),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scanid, 'SCAN_CMD': scancmd}) + "\n"
elif int(status) == 301 or int(status) == 302:
resp = "device " + ctx['ip'] + " is of type " + ctx['devType'] + " still has default password"
info(resp)
data = json.dumps({'HOST': ctx['ip'], 'USERNAME': '', 'PASSWORD': '', 'PORT': '',
'TYPE': 'iot_scan', 'DESCRIPTION': str(resp),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scanid, 'SCAN_CMD': scancmd}) + "\n"
elif int(status) == 401 and ctx['dev']['auth'][0] == "basic":
resp = "device " + ctx['ip'] + " is of type " + ctx['devType'] + " has changed password"
info(resp)
data = json.dumps({'HOST': ctx['ip'], 'USERNAME': '', 'PASSWORD': '', 'PORT': '',
'TYPE': 'iot_scan', 'DESCRIPTION': str(resp),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scanid, 'SCAN_CMD': scancmd}) + "\n"
else:
resp = "device " + ctx['ip'] + ": unexpected resp code " + str(status)
error(resp)
data = json.dumps({'HOST': ctx['ip'], 'USERNAME': '', 'PASSWORD': '', 'PORT': '',
'TYPE': 'iot_scan', 'DESCRIPTION': str(resp),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scanid, 'SCAN_CMD': scancmd}) + "\n"
__log_into_file(loginfile, 'a', data, lang)
return
| |
of the TIN nodes.
distances: numpy real-type array with the distances between each connection in the TIN.
globalIDs: numpy integer-type array containing for local nodes their global IDs.
To solve channel incision and landscape evolution, the algorithm follows the O(n)-efficient ordering
method from Braun and Willett (2013) and is based on a *single-flow-direction* (**SFD**) approximation
assuming that water goes down the path of the steepest slope.
.. seealso::
<NAME>, <NAME>. A very efficient O(n), implicit and parallel method to solve the stream power equation governing fluvial incision and landscape evolution. Geomorphology. 2013;180–181(Supplement C):170–179.
"""
# Call the SFD function from libUtils
# Get the directions from true surface
base1, receivers1 = sfd.directions_base(elev, neighbours, edges, distances, globalIDs)
# Send local base level globally
bpos = numpy.where(base1 >= 0)[0]
self.base1 = base1[bpos]
# Send local receivers globally
self.receivers1 = receivers1
# Get the directions from filled surface
base, receivers, maxh, maxdep = sfd.directions(fillH, elev, neighbours, edges, distances, globalIDs)
# Send local base level globally
bpos = numpy.where(base >= 0)[0]
self.base = base[bpos]
numpy.random.shuffle(self.base)
# Send local receivers globally
self.receivers = receivers
# Send local maximum height globally
self.maxh = maxh
# Send local maximum deposition globally
self.maxdep = maxdep
return
def _donors_number_array(self):
"""
Creates an array containing the number of donors for each node.
"""
self.arrdonor = None
numPts = len(self.receivers)
self.arrdonor = numpy.zeros(numPts, dtype=int)
maxID = numpy.max(self.receivers)
self.arrdonor[:(maxID+1)] = numpy.bincount(self.receivers)
self.maxdonors = self.arrdonor.max()
return
def _delta_array(self):
"""
Creates the "delta" array, which is a list containing, for each
node, the array index where that node's donor list begins.
"""
self.delta = None
nbdonors = len(self.arrdonor)
self.delta = numpy.zeros( nbdonors+1, dtype=int)
self.delta.fill(nbdonors)
self.delta[-2::-1] -= numpy.cumsum(self.arrdonor[::-1])
return
def _donors_number_array1(self):
"""
Creates an array containing the number of donors for each node.
"""
self.arrdonor = None
numPts = len(self.receivers1)
self.arrdonor = numpy.zeros(numPts, dtype=int)
maxID = numpy.max(self.receivers1)
self.arrdonor[:(maxID+1)] = numpy.bincount(self.receivers1)
self.maxdonors = self.arrdonor.max()
return
def _delta_array1(self):
"""
Creates the "delta" array, which is a list containing, for each
node, the array index where that node's donor list begins.
"""
self.delta = None
nbdonors = len(self.arrdonor)
self.delta = numpy.zeros( nbdonors+1, dtype=int)
self.delta.fill(nbdonors)
self.delta[-2::-1] -= numpy.cumsum(self.arrdonor[::-1])
return
def ordered_node_array_filled(self):
"""
Creates an array of node IDs that is arranged in order from downstream
to upstream for filled surface.
"""
# Build donors array for each node
self._donors_number_array()
# Create the delta array
self._delta_array()
# Using libUtils stack create the ordered node array
self.donors,lstcks = flowalgo.build(self.localbase,self.receivers,self.delta)
# Create local stack
stids = numpy.where(lstcks > -1 )[0]
self.localstack = lstcks[stids]
return
def ordered_node_array_elev(self):
"""
Creates an array of node IDs that is arranged in order from downstream
to upstream for real surface.
"""
# Build donors array for each node
self._donors_number_array1()
# Create the delta array
self._delta_array1()
# Using libUtils stack create the ordered node array
self.donors1,lstcks = flowalgo.build(self.localbase1,self.receivers1,self.delta)
# Create local stack
stids = numpy.where(lstcks > -1 )[0]
self.localstack1 = lstcks[stids]
return
def compute_flow(self, sealevel, elev, Acell, rain):
"""
Calculates the **drainage area** and **water discharge** at each node.
Args:
elev: numpy arrays containing the elevation of the TIN nodes.
Acell: numpy float-type array containing the voronoi area for each nodes (in :math:`{m}^2`).
rain: numpy float-type array containing the precipitation rate for each nodes (in :math:`{m/a}`).
"""
numPts = len(Acell)
self.discharge = numpy.zeros(numPts, dtype=float)
self.discharge[self.stack] = Acell[self.stack] * rain[self.stack]
# Compute discharge using libUtils
self.discharge, self.activelay = flowalgo.discharge(sealevel, self.localstack, self.receivers,
elev, self.discharge)
return
def view_receivers(self, fillH, elev, neighbours, edges, distances, globalIDs, sea):
"""
Single Flow Direction function computes downslope flow directions by inspecting the neighborhood
elevations around each node. The SFD method assigns a unique flow direction towards the steepest
downslope neighbor.
Args:
fillH: numpy array containing the filled elevations from Planchon & Darboux depression-less algorithm.
elev: numpy arrays containing the elevation of the TIN nodes.
neighbours: numpy integer-type array with the neighbourhood IDs.
edges: numpy real-type array with the voronoi edges length for each neighbours of the TIN nodes.
distances: numpy real-type array with the distances between each connection in the TIN.
globalIDs: numpy integer-type array containing for local nodes their global IDs.
sea: current elevation of sea level.
.. image:: img/stack2.jpg
:scale: 80 %
:alt: SFD
:align: center
Nodal representation of the landform. Nodes are indicated as black circles. The lines
represent all the possible connections among neighboring nodes. The solid lines indicate
the connections following the steepest descent hypothesis (indicated by the arrows).
.. seealso::
Braun and Willett, 2013: A very efficient O(n), implicit and parallel method to solve
the stream power equation governing fluvial incision and landscape evolution - *Geomorphology*,
170-179, `doi:10.1016/j.geomorph.2012.10.008`_.
"""
# Call the SFD function from libUtils
base, receivers = sfd.dirview(fillH, elev, neighbours, edges, distances, globalIDs, sea)
# Send local base level globally
bpos = numpy.where(base >= 0)[0]
self.base = base[bpos]
numpy.random.shuffle(self.base)
# Send local receivers globally
self.receivers = receivers
self.localbase = self.base
self.ordered_node_array_filled()
globalstack = self.localstack
self.stack = globalstack
return
def compute_parameters(self,elevation,sealevel):
"""
Calculates the catchment IDs and the Chi parameter (Willett 2014).
"""
# Get basin starting IDs for each local partition
cumbase = numpy.zeros(2)
for i in range(1):
cumbase[i+1] = len(numpy.array_split(self.base, 1)[i])+cumbase[i]+1
# Compute discharge using libUtils
idsl = numpy.where(elevation<sealevel)[0]
rcv = numpy.copy(self.receivers)
rcv[idsl] = -1
chi, basinID = flowalgo.parameters(self.localstack,rcv,
self.discharge,self.xycoords,cumbase[0])
self.chi = chi
self.basinID = numpy.copy(basinID)
self.basinID[idsl] = -1
return
def compute_parameters_depression(self, fillH, elev, Acell, sealevel, debug=False):
"""
Calculates each depression maximum deposition volume and its downstream draining node.
Args:
fillH: numpy array containing the filled elevations from Planchon & Darboux depression-less algorithm.
elev: numpy arrays containing the elevation of the TIN nodes.
Acell: numpy float-type array containing the voronoi area for each nodes (in :math:`{m}^2`)
sealevel: real value giving the sea-level height at considered time step.
"""
# Compute pit ID and volume using libUtils
pitID, pitVolume = flowalgo.basinparameters(self.localstack1,self.receivers1,
elev,fillH,Acell)
self.pitID = pitID
self.pitVolume = pitVolume
self.pitVolume[self.pitVolume<=0] = 0.
# Find the depression node IDs
pIDs = numpy.where(self.pitVolume>=0.)[0]
if(len(pIDs)>0):
xyidd = numpy.where(self.pitVolume==self.pitVolume.max())[0]
# Order the pits based on filled elevation from top to bottom
orderPits = numpy.argsort(fillH[pIDs])[::-1]
# Find the depression or edge, marine point where a given pit is draining
self.pitDrain = flowalgo.basindrainage(orderPits,self.pitID,self.receivers,pIDs,
fillH,sealevel)
self.allDrain = flowalgo.basindrainageall(orderPits,self.pitID,self.receivers,pIDs)
# Debugging plotting function
if debug:
self._visualise_draining_path(pIDs, elev, self.pitDrain, fillH, 'drain')
self._visualise_draining_path(pIDs, elev, self.allDrain, fillH, 'alldrain')
else:
self.pitDrain = -numpy.ones(len(pitID))
self.allDrain = -numpy.ones(len(pitID))
return
def compute_sedflux(self, Acell, elev, rain, fillH, dt, actlay, rockCk, rivqs,
sealevel, perc_dep, slp_cr, ngbh, verbose=False):
"""
Calculates the **sediment flux** at each node.
Args:
Acell: numpy float-type array containing the voronoi area for each nodes (in :math:`{m}^2`)
elev: numpy arrays containing the elevation of the TIN nodes.
rain: numpy float-type array containing the precipitation rate for each nodes (in :math:`{m/a}`).
fillH: numpy array containing the lake elevations.
dt: real value corresponding to the maximal stability time step.
actlay: active layer composition.
rockCk: rock erodibility values.
rivqs: numpy arrays representing the sediment fluxes from rivers.
sealevel: real value giving the sea-level height at considered time step.
slp_cr: critical slope used to force aerial deposition for alluvial plain.
perc_dep: maximum percentage of deposition at any given time interval.
Returns
-------
erosion
numpy array containing erosion thicknesses for each node of the TIN (in m).
depo
numpy array containing deposition thicknesses for each node of the TIN (in m).
sedflux
numpy array containing cumulative sediment flux on each node (in :math:`{m}^3/{m}^2`).
newdt
new time step to ensure flow computation stability.
"""
check = False
newdt = numpy.copy(dt)
if actlay is None:
sedflux = numpy.zeros((len(elev),1))
else:
sedflux = numpy.zeros((len(elev),len(rockCk)))
# Compute sediment flux using libUtils
# Stream power law
if self.spl:
if verbose:
time0 = time.clock()
time1 = time.clock()
# Find border/inside nodes
if self.mp>0.:
if self.straTIN == 1:
rp = numpy.power(rain,self.mp).reshape((len(elev),1))
eroCoeff = rockCk * rp
else:
eroCoeff = self.erodibility*numpy.power(rain,self.mp)
eroCoeff.reshape((len(elev),1))
else:
if self.straTIN == 1:
eroCoeff = numpy.tile(rockCk,(len(elev),1))
else:
eroCoeff = self.erodibility.reshape((len(elev),1))
if actlay is None:
| |
= tuple(size)
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
for polygon in self.polygons:
flipped_polygons.append(polygon.transpose(method))
return PolygonList(flipped_polygons, size=self.size)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
cropped_polygons = []
for polygon in self.polygons:
cropped_polygons.append(polygon.crop(box))
cropped_size = w, h
return PolygonList(cropped_polygons, cropped_size)
def resize(self, size):
resized_polygons = []
for polygon in self.polygons:
resized_polygons.append(polygon.resize(size))
resized_size = size
return PolygonList(resized_polygons, resized_size)
def to(self, *args, **kwargs):
return self
def convert_to_binarymask(self):
if len(self) > 0:
masks = torch.stack([p.convert_to_binarymask() for p in self.polygons])
else:
size = self.size
masks = torch.empty([0, size[1], size[0]], dtype=torch.uint8)
return BinaryMaskList(masks, size=self.size)
def __len__(self):
return len(self.polygons)
def __getitem__(self, item):
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
else:
# advanced indexing on a single dimension
selected_polygons = []
# if isinstance(item, torch.Tensor) and item.dtype == torch.uint8 or item.dtype == torch.bool:
if isinstance(item, torch.Tensor) and \
(item.dtype == torch.uint8 or item.dtype == torch.bool):
item = item.nonzero()
item = item.squeeze(1) if item.numel() > 0 else item
item = item.tolist()
for i in item:
selected_polygons.append(self.polygons[i])
return PolygonList(selected_polygons, size=self.size)
def __iter__(self):
return iter(self.polygons)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
class SegmentationMask(object):
"""
This class stores the segmentations for all objects in the image.
It wraps BinaryMaskList and PolygonList conveniently.
"""
def __init__(self, instances, size, mode="poly"):
"""
Arguments:
instances: two types
(1) polygon
(2) binary mask
size: (width, height)
mode: 'poly', 'mask'. if mode is 'mask', convert mask of any format to binary mask
"""
assert isinstance(size, (list, tuple))
assert len(size) == 2
if isinstance(size[0], torch.Tensor):
assert isinstance(size[1], torch.Tensor)
size = size[0].item(), size[1].item()
assert isinstance(size[0], (int, float))
assert isinstance(size[1], (int, float))
if mode == "poly":
self.instances = PolygonList(instances, size)
elif mode == "mask":
self.instances = BinaryMaskList(instances, size)
else:
raise NotImplementedError("Unknown mode: %s" % str(mode))
self.mode = mode
self.size = tuple(size)
def transpose(self, method):
flipped_instances = self.instances.transpose(method)
return SegmentationMask(flipped_instances, self.size, self.mode)
def crop(self, box):
cropped_instances = self.instances.crop(box)
cropped_size = cropped_instances.size
return SegmentationMask(cropped_instances, cropped_size, self.mode)
def resize(self, size, *args, **kwargs):
resized_instances = self.instances.resize(size)
resized_size = size
return SegmentationMask(resized_instances, resized_size, self.mode)
def to(self, *args, **kwargs):
return self
def convert(self, mode):
if mode == self.mode:
return self
if mode == "poly":
converted_instances = self.instances.convert_to_polygon()
elif mode == "mask":
converted_instances = self.instances.convert_to_binarymask()
else:
raise NotImplementedError("Unknown mode: %s" % str(mode))
return SegmentationMask(converted_instances, self.size, mode)
def get_mask_tensor(self):
instances = self.instances
if self.mode == "poly":
instances = instances.convert_to_binarymask()
# If there is only 1 instance
return instances.masks.squeeze(0)
def __len__(self):
return len(self.instances)
def __getitem__(self, item):
selected_instances = self.instances.__getitem__(item)
return SegmentationMask(selected_instances, self.size, self.mode)
def __iter__(self):
self.iter_idx = 0
return self
def __next__(self):
if self.iter_idx < self.__len__():
next_segmentation = self.__getitem__(self.iter_idx)
self.iter_idx += 1
return next_segmentation
raise StopIteration()
next = __next__ # Python 2 compatibility
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.instances))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
"""
BoxList
"""
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class BoxList(object):
"""
This class represents a set of bounding boxes.
The bounding boxes are represented as a Nx4 Tensor.
In order to uniquely determine the bounding boxes with respect
to an image, we also store the corresponding image dimensions.
They can contain extra information that is specific to each bounding box, such as
labels.
"""
def __init__(self, bbox, image_size, mode="xyxy"):
device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu")
bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
if bbox.ndimension() != 2:
raise ValueError(
"bbox should have 2 dimensions, got {}".format(bbox.ndimension())
)
if bbox.size(-1) != 4:
raise ValueError(
"last dimension of bbox should have a "
"size of 4, got {}".format(bbox.size(-1))
)
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
self.bbox = bbox
self.size = image_size # (image_width, image_height)
self.mode = mode
self.extra_fields = {}
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def has_field(self, field):
return field in self.extra_fields
def fields(self):
return list(self.extra_fields.keys())
def _copy_extra_fields(self, bbox):
for k, v in bbox.extra_fields.items():
self.extra_fields[k] = v
def convert(self, mode):
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
# we only have two modes, so don't need to check
# self.mode
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if mode == "xyxy":
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 1
bbox = torch.cat(
(xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1
)
bbox = BoxList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
def _split_into_xyxy(self):
if self.mode == "xyxy":
xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
return xmin, ymin, xmax, ymax
elif self.mode == "xywh":
TO_REMOVE = 1
xmin, ymin, w, h = self.bbox.split(1, dim=-1)
return (
xmin,
ymin,
xmin + (w - TO_REMOVE).clamp(min=0),
ymin + (h - TO_REMOVE).clamp(min=0),
)
else:
raise RuntimeError("Should not be here")
def resize(self, size, *args, **kwargs):
"""
Returns a resized copy of this bounding box
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_box = self.bbox * ratio
bbox = BoxList(scaled_box, size, mode=self.mode)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox
ratio_width, ratio_height = ratios
xmin, ymin, xmax, ymax = self._split_into_xyxy()
scaled_xmin = xmin * ratio_width
scaled_xmax = xmax * ratio_width
scaled_ymin = ymin * ratio_height
scaled_ymax = ymax * ratio_height
scaled_box = torch.cat(
(scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1
)
bbox = BoxList(scaled_box, size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
image_width, image_height = self.size
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 1
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
transposed_boxes = torch.cat(
(transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1
)
bbox = BoxList(transposed_boxes, self.size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def crop(self, box):
"""
Cropss a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
xmin, ymin, xmax, ymax = self._split_into_xyxy()
w, h = box[2] - box[0], box[3] - box[1]
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
# TODO should I filter empty boxes here?
if False:
is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax)
cropped_box = torch.cat(
(cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1
)
bbox = BoxList(cropped_box, (w, h), mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.crop(box)
bbox.add_field(k, v)
return bbox.convert(self.mode)
# Tensor-like methods
def to(self, device):
bbox = BoxList(self.bbox.to(device), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, "to"):
v = v.to(device)
bbox.add_field(k, v)
return bbox
def __getitem__(self, item):
bbox = BoxList(self.bbox[item], self.size, self.mode)
for k, v in self.extra_fields.items():
bbox.add_field(k, v[item])
return bbox
def __len__(self):
return self.bbox.shape[0]
def clip_to_image(self, remove_empty=True):
TO_REMOVE = 1
self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)
self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)
if remove_empty:
box = self.bbox
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
return self[keep]
return self
def area(self):
| |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _triple
from torch.nn.parameter import Parameter
_BN_CLASS_MAP = {
1: nn.BatchNorm1d,
2: nn.BatchNorm2d,
3: nn.BatchNorm3d,
}
# Adopted from https://github.com/pytorch/pytorch/blob/master/torch/nn/intrinsic/qat/modules/conv_fused.py
class _ConvBnNd(nn.modules.conv._ConvNd):
_version = 2
def __init__(
self,
# ConvNd args
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
# BatchNormNd args
# num_features: out_channels
eps=1e-05,
momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None,
dim=2):
nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels,
kernel_size, stride, padding, dilation,
transposed, output_padding, groups, False,
padding_mode)
assert qconfig and qconfig.weight and qconfig.bias, 'qconfig must be provided for QAT module'
self.frozen = freeze_bn if self.training else True
self.dim = dim
self.bn = _BN_CLASS_MAP[dim](out_channels, eps, momentum, True, True)
self.weight_quantizer = qconfig.weight
self.bias_quantizer = qconfig.bias
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_bn_parameters()
# this needs to be called after reset_bn_parameters,
# as they modify the same state
if self.training:
if freeze_bn:
self.freeze_bn()
else:
self.update_bn()
else:
self.freeze_bn()
def reset_running_stats(self):
self.bn.reset_running_stats()
def reset_bn_parameters(self):
self.bn.reset_running_stats()
init.uniform_(self.bn.weight)
init.zeros_(self.bn.bias)
# note: below is actully for conv, not BN
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def batch_stats(self, x, bias=None):
"""Get the batch mean and variance of x and updates the BatchNorm's running mean and average.
Args:
x (torch.Tensor): input batch.
bias (torch.Tensor): the bias that is to be applied to the batch.
Returns:
(mean, variance)
Note:
In case of `nn.Linear`, x may be of shape (N, C, L) or (N, L)
where N is batch size, C is number of channels, L is the features size.
The batch norm computes the stats over C in the first case or L on the second case.
The batch normalization layer is
(`nn.BatchNorm1d`)[https://pytorch.org/docs/stable/nn.html#batchnorm1d]
In case of `nn.Conv2d`, x is of shape (N, C, H, W)
where H,W are the image dimensions, and the batch norm computes the stats over C.
The batch normalization layer is
(`nn.BatchNorm2d`)[https://pytorch.org/docs/stable/nn.html#batchnorm2d]
In case of `nn.Conv3d`, x is of shape (N, C, D, H, W)
where H,W are the image dimensions, D is additional channel dimension,
and the batch norm computes the stats over C.
The batch normalization layer is
(`nn.BatchNorm3d`)[https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm3d.html#torch.nn.BatchNorm3d]
"""
channel_size = self.bn.num_features
self.bn.num_batches_tracked += 1
# Calculate current batch stats
batch_mean = x.transpose(0, 1).contiguous().view(channel_size, -1).mean(1)
# BatchNorm currently uses biased variance (without Bessel's correction) as was discussed at
# https://github.com/pytorch/pytorch/issues/1410
#
# also see the source code itself:
# https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Normalization.cpp#L216
batch_var = x.transpose(0, 1).contiguous().view(channel_size, -1).var(
1, unbiased=False)
# Update running stats
with torch.no_grad():
biased_batch_mean = batch_mean + (bias if bias is not None else 0)
# However - running_var is updated using unbiased variance!
# https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Normalization.cpp#L223
n = x.numel() / channel_size
corrected_var = batch_var * (n / float(n - 1))
momentum = self.bn.momentum
if momentum is None:
# momentum is None - we compute a cumulative moving average
# as noted in https://pytorch.org/docs/stable/nn.html#batchnorm2d
momentum = 1. / float(self.bn.num_batches_tracked)
self.bn.running_mean.mul_(1 - momentum).add_(momentum * biased_batch_mean)
self.bn.running_var.mul_(1 - momentum).add_(momentum * corrected_var)
return batch_mean, batch_var
def reset_parameters(self):
super(_ConvBnNd, self).reset_parameters()
def update_bn(self):
self.frozen = False
self.bn.training = True
return self
def merge_bn_to_conv(self):
with torch.no_grad():
# Use the same implementation in nndct_shared/optimzation/fuse_conv_bn.py
# to make sure the test accruacy is same as the deployable model.
gamma = self.bn.weight.detach().cpu().numpy()
beta = self.bn.bias.detach().cpu().numpy()
running_var = self.bn.running_var.detach().cpu().numpy()
running_mean = self.bn.running_mean.detach().cpu().numpy()
epsilon = self.bn.eps
scale = gamma / np.sqrt(running_var + epsilon)
offset = beta - running_mean * scale
weight = self.weight.detach().cpu().numpy()
# Conv2d
if self.dim == 2 and not self.transposed:
# OIHW -> IHWO -> OIHW
weight = np.multiply(weight.transpose(1, 2, 3, 0),
scale).transpose(3, 0, 1, 2)
# ConvTranspose2d
elif self.dim == 2 and self.transposed:
# IOHW -> IHWO -> IOHW
weight = np.multiply(weight.transpose(0, 2, 3, 1),
scale).transpose(0, 3, 1, 2)
# Conv3D
elif self.dim == 3 and not self.transposed:
weight = np.multiply(weight.transpose(1, 2, 3, 4, 0),
scale).transpose(4, 0, 1, 2, 3)
# ConvTranspose3d
elif self.dim == 3 and self.transposed:
weight = np.multiply(weight.transpose(2, 3, 4, 0, 1),
scale).transpose(3, 4, 0, 1, 2)
else:
raise RuntimeError(
'Unsupported combinations: (dim={}, transposed={})'.format(
self.dim, self.transposed))
self.weight.copy_(torch.from_numpy(weight))
bias = self.bias.detach().cpu().numpy() if self.bias is not None else 0
bias = torch.from_numpy(bias * scale + offset)
if self.bias is not None:
self.bias.copy_(bias)
else:
self.bias = nn.Parameter(bias)
def freeze_bn(self):
if self.frozen:
return
self.merge_bn_to_conv()
self.frozen = True
self.bn.training = False
def clear_non_native_bias(self):
if self.bias is None:
print('WARNING: No bias to unmerge')
return
with torch.no_grad():
gamma = self.bn.weight.detach().cpu().numpy()
beta = self.bn.bias.detach().cpu().numpy()
running_var = self.bn.running_var.detach().cpu().numpy()
running_mean = self.bn.running_mean.detach().cpu().numpy()
epsilon = self.bn.eps
scale = gamma / np.sqrt(running_var + epsilon)
bias = self.bias.detach().cpu().numpy()
beta = torch.from_numpy(bias * scale + beta)
self.bn.bias.copy_(beta)
self.bias = None
def broadcast_correction(self, c):
"""Broadcasts a correction factor to the output for elementwise operations.
Two tensors are “broadcastable” if the following rules hold:
- Each tensor has at least one dimension.
- When iterating over the dimension sizes, starting at the trailing
dimension, the dimension sizes must either be equal,
one of them is 1, or one of them does not exist.
See https://pytorch.org/docs/stable/notes/broadcasting.html
"""
expected_output_dim = self.dim + 2
view_fillers_dim = expected_output_dim - c.dim() - 1
view_filler = (1,) * view_fillers_dim
expected_view_shape = c.shape + view_filler
return c.view(*expected_view_shape)
def broadcast_correction_weight(self, c):
"""Broadcasts a correction factor to the weight."""
if c.dim() != 1:
raise ValueError("Correction factor needs to have a single dimension")
expected_weight_dim = self.dim + 2
view_fillers_dim = expected_weight_dim - c.dim()
view_filler = (1,) * view_fillers_dim
expected_view_shape = c.shape + view_filler
return c.view(*expected_view_shape)
def extra_repr(self):
return super(_ConvBnNd, self).extra_repr()
def forward(self, x, output_size=None):
gamma, beta = self.bn.weight, self.bn.bias
if self.frozen:
quantized_weight = self.weight_quantizer(self.weight)
quantized_bias = self.bias_quantizer(self.bias)
return self._conv_forward(x, quantized_weight, quantized_bias,
output_size)
if self.training:
batch_mean, batch_var = self.batch_stats(
self._conv_forward(x, self.weight, output_size=output_size),
self.bias)
recip_sigma_batch = torch.rsqrt(batch_var + self.bn.eps)
with torch.no_grad():
sigma_running = torch.sqrt(self.bn.running_var + self.bn.eps)
w_corrected = self.weight * self.broadcast_correction_weight(
gamma / sigma_running)
w_quantized = self.weight_quantizer(w_corrected)
recip_c = self.broadcast_correction(sigma_running * recip_sigma_batch)
bias_corrected = beta - gamma * batch_mean * recip_sigma_batch
bias_quantized = self.broadcast_correction(
self.bias_quantizer(bias_corrected))
y = self._conv_forward(x, w_quantized, None, output_size)
y.mul_(recip_c).add_(bias_quantized)
else:
with torch.no_grad():
recip_sigma_running = torch.rsqrt(self.bn.running_var + self.bn.eps)
w_corrected = self.weight * self.broadcast_correction_weight(
gamma * recip_sigma_running)
w_quantized = self.weight_quantizer(w_corrected)
corrected_mean = self.bn.running_mean - (
self.bias if self.bias is not None else 0)
bias_corrected = beta - gamma * corrected_mean * recip_sigma_running
bias_quantized = self.bias_quantizer(bias_corrected)
y = self._conv_forward(x, w_quantized, bias_quantized, output_size)
return y
def train(self, mode=True):
"""Batchnorm's training behavior is using the self.training flag. Prevent
changing it if BN is frozen. This makes sure that calling `model.train()`
on a model with a frozen BN will behave properly.
"""
if mode and self.frozen:
raise RuntimeError('Training after freezing BN is not supported yet.')
self.training = mode
for module in self.children():
module.train(mode)
return self
@property
def is_quantized(self):
return True
@classmethod
def from_float(cls, conv, bn, qconfig):
"""Create a qat module from a float module."""
assert qconfig, 'Input float module must have a valid qconfig'
convbn = cls(conv.in_channels, conv.out_channels, conv.kernel_size,
conv.stride, | |
form a balanced incomplete block design.
Now, considering a multiplicative generator `z` of `GF(q^{d+1})`, we get a
transitive action of a cyclic group on our projective plane from which it is
possible to build a difference set.
The construction is given in details in [Stinson2004]_, section 3.3.
EXAMPLES::
sage: from sage.combinat.designs.difference_family import singer_difference_set, is_difference_family
sage: G,D = singer_difference_set(3,2)
sage: is_difference_family(G,D,verbose=True)
It is a (13,4,1)-difference family
True
sage: G,D = singer_difference_set(4,2)
sage: is_difference_family(G,D,verbose=True)
It is a (21,5,1)-difference family
True
sage: G,D = singer_difference_set(3,3)
sage: is_difference_family(G,D,verbose=True)
It is a (40,13,4)-difference family
True
sage: G,D = singer_difference_set(9,3)
sage: is_difference_family(G,D,verbose=True)
It is a (820,91,10)-difference family
True
"""
q = Integer(q)
assert q.is_prime_power()
assert d >= 2
from sage.rings.finite_rings.finite_field_constructor import GF
from sage.rings.finite_rings.conway_polynomials import conway_polynomial
from sage.rings.finite_rings.integer_mod_ring import Zmod
# build a polynomial c over GF(q) such that GF(q)[x] / (c(x)) is a
# GF(q**(d+1)) and such that x is a multiplicative generator.
p,e = q.factor()[0]
c = conway_polynomial(p,e*(d+1))
if e != 1: # i.e. q is not a prime, so we factorize c over GF(q) and pick
# one of its factor
K = GF(q,'z')
c = c.change_ring(K).factor()[0][0]
else:
K = GF(q)
z = c.parent().gen()
# Now we consider the GF(q)-subspace V spanned by (1,z,z^2,...,z^(d-1)) inside
# GF(q^(d+1)). The multiplication by z is an automorphism of the
# GF(q)-projective space built from GF(q^(d+1)). The difference family is
# obtained by taking the integers i such that z^i belong to V.
powers = [0]
i = 1
x = z
k = (q**d-1)//(q-1)
while len(powers) < k:
if x.degree() <= (d-1):
powers.append(i)
x = (x*z).mod(c)
i += 1
return Zmod((q**(d+1)-1)//(q-1)), [powers]
def df_q_6_1(K, existence=False, check=True):
r"""
Return a `(q,6,1)`-difference family over the finite field `K`.
The construction uses Theorem 11 of [Wi72]_.
EXAMPLES::
sage: from sage.combinat.designs.difference_family import is_difference_family, df_q_6_1
sage: prime_powers = [v for v in range(31,500,30) if is_prime_power(v)]
sage: parameters = [v for v in prime_powers if df_q_6_1(GF(v,'a'), existence=True)]
sage: parameters
[31, 151, 181, 211, 241, 271, 331, 361, 421]
sage: for v in parameters:
....: K = GF(v, 'a')
....: df = df_q_6_1(K, check=True)
....: assert is_difference_family(K, df, v, 6, 1)
.. TODO::
Do improvements due to Zhen and Wu 1999.
"""
v = K.cardinality()
x = K.multiplicative_generator()
one = K.one()
if v % 30 != 1:
if existence:
return False
raise EmptySetError("k(k-1)=30 should divide (v-1)")
t = (v-1) // 30 # number of blocks
r = x**((v-1)//3) # primitive cube root of unity
r2 = r*r # the other primitive cube root
# we now compute the cosets of x**i
xx = x**5
to_coset = {x**i * xx**j: i for i in range(5) for j in range((v-1)/5)}
for c in to_coset: # the loop runs through all nonzero elements of K
if c == one or c == r or c == r2:
continue
if len(set(to_coset[elt] for elt in (r-one, c*(r-one), c-one, c-r, c-r**2))) == 5:
if existence:
return True
B = [one,r,r2,c,c*r,c*r2]
D = [[xx**i * b for b in B] for i in range(t)]
break
else:
if existence:
return Unknown
raise NotImplementedError("Wilson construction failed for v={}".format(v))
if check and not is_difference_family(K, D, v, 6, 1):
raise RuntimeError("Wilson 1972 construction failed! Please e-mail <EMAIL>")
return D
def radical_difference_set(K, k, l=1, existence=False, check=True):
r"""
Return a difference set made of a cyclotomic coset in the finite field
``K`` and with parameters ``k`` and ``l``.
Most of these difference sets appear in chapter VI.18.48 of the Handbook of
combinatorial designs.
EXAMPLES::
sage: from sage.combinat.designs.difference_family import radical_difference_set
sage: D = radical_difference_set(GF(7), 3, 1); D
[[1, 2, 4]]
sage: sorted(x-y for x in D[0] for y in D[0] if x != y)
[1, 2, 3, 4, 5, 6]
sage: D = radical_difference_set(GF(16,'a'), 6, 2)
sage: sorted(x-y for x in D[0] for y in D[0] if x != y)
[1,
1,
a,
a,
a + 1,
a + 1,
a^2,
a^2,
...
a^3 + a^2 + a + 1,
a^3 + a^2 + a + 1]
sage: for k in range(2,50):
....: for l in reversed(divisors(k*(k-1))):
....: v = k*(k-1)//l + 1
....: if is_prime_power(v) and radical_difference_set(GF(v,'a'),k,l,existence=True):
....: _ = radical_difference_set(GF(v,'a'),k,l)
....: print("{:3} {:3} {:3}".format(v,k,l))
3 2 1
4 3 2
7 3 1
5 4 3
7 4 2
13 4 1
11 5 2
7 6 5
11 6 3
16 6 2
8 7 6
9 8 7
19 9 4
37 9 2
73 9 1
11 10 9
19 10 5
23 11 5
13 12 11
23 12 6
27 13 6
27 14 7
16 15 14
31 15 7
...
41 40 39
79 40 20
83 41 20
43 42 41
83 42 21
47 46 45
49 48 47
197 49 12
"""
v = K.cardinality()
if l*(v-1) != k*(k-1):
if existence:
return False
raise EmptySetError("l*(v-1) is not equal to k*(k-1)")
# trivial case
if (v-1) == k:
if existence:
return True
add_zero = False
# q = 3 mod 4
elif v%4 == 3 and k == (v-1)//2:
if existence:
return True
add_zero = False
# q = 3 mod 4
elif v%4 == 3 and k == (v+1)//2:
if existence:
return True
add_zero = True
# q = 4t^2 + 1, t odd
elif v%8 == 5 and k == (v-1)//4 and arith.is_square((v-1)//4):
if existence:
return True
add_zero = False
# q = 4t^2 + 9, t odd
elif v%8 == 5 and k == (v+3)//4 and arith.is_square((v-9)//4):
if existence:
return True
add_zero = True
# exceptional case 1
elif (v,k,l) == (16,6,2):
if existence:
return True
add_zero = True
# exceptional case 2
elif (v,k,l) == (73,9,1):
if existence:
return True
add_zero = False
# are there more ??
else:
x = K.multiplicative_generator()
D = K.cyclotomic_cosets(x**((v-1)//k), [K.one()])
if is_difference_family(K, D, v, k, l):
print("** You found a new example of radical difference set **\n"\
"** for the parameters (v,k,l)=({},{},{}). **\n"\
"** Please contact sage-<EMAIL> **\n".format(v, k, l))
if existence:
return True
add_zero = False
else:
D = K.cyclotomic_cosets(x**((v-1)//(k-1)), [K.one()])
D[0].insert(0,K.zero())
if is_difference_family(K, D, v, k, l):
print("** You found a new example of radical difference set **\n"\
"** for the parameters (v,k,l)=({},{},{}). **\n"\
"** Please contact <EMAIL> **\n".format(v, k, l))
if existence:
return True
add_zero = True
elif existence:
return False
else:
raise EmptySetError("no radical difference set exist "
"for the parameters (v,k,l) = ({},{},{}".format(v,k,l))
x = K.multiplicative_generator()
if add_zero:
r = x**((v-1)//(k-1))
D = K.cyclotomic_cosets(r, [K.one()])
D[0].insert(0, K.zero())
else:
r = x**((v-1)//k)
D = K.cyclotomic_cosets(r, [K.one()])
if check and not is_difference_family(K, D, v, k, l):
raise RuntimeError("Sage tried to build a radical difference set with "
"parameters ({},{},{}) but it seems that it failed! Please "
"e-mail <EMAIL>".format(v,k,l))
return D
def one_cyclic_tiling(A,n):
r"""
Given a subset ``A`` of the cyclic additive group `G = Z / nZ` return
another subset `B` so that `A + B = G` and `|A| |B| = n` (i.e. any element
of `G` is uniquely expressed as a sum `a+b` with `a` in `A` and `b` in `B`).
EXAMPLES::
sage: from sage.combinat.designs.difference_family import one_cyclic_tiling
sage: tile = [0,2,4]
sage: m = one_cyclic_tiling(tile,6); m
[0, 3]
sage: sorted((i+j)%6 for i in tile for j in m)
[0, 1, 2, 3, 4, 5]
sage: def print_tiling(tile, translat, n):
....: for x in translat:
....: print(''.join('X' if (i-x)%n in tile else '.' for i in range(n)))
sage: tile = [0, 1, 2, 7]
sage: m = one_cyclic_tiling(tile, 12)
sage: print_tiling(tile, m, 12)
XXX....X....
....XXX....X
...X....XXX.
sage: tile = [0, 1, 5]
sage: m = one_cyclic_tiling(tile, 12)
sage: print_tiling(tile, m, 12)
XX...X......
...XX...X...
......XX...X
..X......XX.
sage: tile = [0, 2]
sage: m = one_cyclic_tiling(tile, 8)
sage: print_tiling(tile, m, 8)
X.X.....
....X.X.
.X.X....
.....X.X
ALGORITHM:
Uses dancing links :mod:`sage.combinat.dlx`
"""
# we first try a naive approach which correspond to what Wilson used in his
| |
DB
Args:
user_id (str): user's fence email id
Returns:
bool: user in fence DB with user_email
"""
session = get_db_session(db)
user = (session.query(User).filter(User.email == user_email)).first()
return user
def user_has_access_to_project(user, project_id, db=None):
"""
Return True IFF user has access to provided project auth_id
Args:
user (fence.model.User): user to check access
project_id (string): project auth_id
db (str): database connection string
Returns:
bool: True IFF user has access to provided project auth_id
"""
session = get_db_session(db)
access_privilege = (
session.query(AccessPrivilege)
.filter(AccessPrivilege.user_id == user.id)
.filter(AccessPrivilege.project_id == project_id)
).first()
return bool(access_privilege)
def do_all_users_have_access_to_project(users, project_id, db=None):
session = get_db_session(db)
# users will be list of fence.model.User's
# check if all users has access to a project with project_id
for user in users:
access_privilege = (
session.query(AccessPrivilege)
.filter(AccessPrivilege.user_id == user.id)
.filter(AccessPrivilege.project_id == project_id)
).first()
if not access_privilege:
project = (session.query(Project).filter(Project.id == project_id)).first()
project_rep = project.auth_id if project else project_id
logger.info(
"User ({}) does not have access to project ({}). There may be other "
"users that do not have access to this project.".format(
user.username.lower(), project_rep
)
)
return False
return True
def patch_user_service_account(
google_project_id, service_account_email, project_access, db=None
):
"""
Update user service account which includes
- Add and remove project access and bucket groups to/from fence db
- Add and remove access members to/from google access group
Args:
google_project_id (str): google project id
service_account_email (str): service account email
project_access (List(str)): list of projects
db(str): db connection string
Returns:
None
"""
session = get_db_session(db)
service_account = (
session.query(UserServiceAccount).filter_by(email=service_account_email).first()
)
if not service_account:
raise fence.errors.NotFound(
"{} does not exist in DB".format(service_account_email)
)
accessed_project_ids = {
ap.project_id
for ap in (
session.query(ServiceAccountAccessPrivilege)
.filter_by(service_account_id=service_account.id)
.all()
)
}
granting_project_ids = get_project_ids_from_project_auth_ids(
session, project_access
)
to_add = set.difference(granting_project_ids, accessed_project_ids)
to_delete = set.difference(accessed_project_ids, granting_project_ids)
_revoke_user_service_account_from_google(
session, to_delete, google_project_id, service_account
)
# Use granting_project_ids here, not to_add, bc the google-delete-expired-service-account
# job doesn't clean out the entries in the ServiceAccountAccessPrivilege table.
# So the set diff (=to_add) won't include the proj if the SA was previously registered for that proj,
# even if the SA later expired and was removed from the relevant GBAG.
add_user_service_account_to_google(
session, granting_project_ids, google_project_id, service_account
)
_revoke_user_service_account_from_db(session, to_delete, service_account)
# On the other hand, use to_add here and not granting_project_ids,
# otherwise this will add duplicates to ServiceAccountAccessPrivilege.
# Because at time of writing, aforementioned tbl has no compound unique constraint.
add_user_service_account_to_db(session, to_add, service_account)
def get_project_ids_from_project_auth_ids(session, auth_ids):
"""
Return the Project.id's for the given list of Project.auth_id's
Args:
auth_ids (set(str)): list of project auth ids
"""
project_ids = set()
for project_auth_id in auth_ids:
project = session.query(Project).filter_by(auth_id=project_auth_id).first()
if not project:
raise fence.errors.NotFound(
"There is no {} in Fence db".format(project_auth_id)
)
project_ids.add(project.id)
return project_ids
def _force_remove_service_account_from_access_db(
service_account, access_groups, db=None
):
"""
Remove the access of service account from db.
Args:
service_account (str): service account email
db(str): db connection string
Returns:
None
Restrictions:
service account has to exist in db
"""
session = get_db_session(db)
for bucket_access_group in access_groups:
sa_to_group = (
session.query(ServiceAccountToGoogleBucketAccessGroup)
.filter_by(
service_account_id=service_account.id,
access_group_id=bucket_access_group.id,
)
.first()
)
if sa_to_group:
session.delete(sa_to_group)
session.commit()
# delete all access privileges
access_privileges = (
session.query(ServiceAccountAccessPrivilege)
.filter_by(service_account_id=service_account.id)
.all()
)
for access in access_privileges:
session.delete(access)
session.commit()
def force_remove_service_account_from_access(
service_account_email, google_project_id, db=None
):
"""
loop through ServiceAccountToBucket
remove from google group
delete entries from db
Args:
service_account_email (str): service account email
google_project_id (str): google project id
db (None, str): Optional db connection string
Returns:
None
"""
session = get_db_session(db)
service_account = (
session.query(UserServiceAccount).filter_by(email=service_account_email).first()
)
if not service_account:
raise fence.errors.NotFound(
"{} does not exist in DB".format(service_account_email)
)
access_groups = get_google_access_groups_for_service_account(service_account)
for bucket_access_group in access_groups:
try:
with GoogleCloudManager(google_project_id, use_default=False) as g_manager:
g_manager.remove_member_from_group(
member_email=service_account.email,
group_id=bucket_access_group.email,
)
except Exception as exc:
raise GoogleAPIError(
"Can not remove member {} from access group {}. Detail {}".format(
service_account.email, bucket_access_group.email, exc
)
)
_force_remove_service_account_from_access_db(service_account, access_groups, db)
def force_remove_service_account_from_db(service_account_email, db=None):
"""
remove service account from user_service_account table
Args:
service_account_email(str): service account to be removed from db
db(None, str): Optional db connection string
"""
session = get_db_session(db)
service_account = (
session.query(UserServiceAccount).filter_by(email=service_account_email).first()
)
if not service_account:
logger.info(
"Service account ({}) requested for removal from database "
"was not found in the database.".format(service_account_email)
)
else:
session.delete(service_account)
session.commit()
return
def _revoke_user_service_account_from_google(
session, to_delete_project_ids, google_project_id, service_account
):
"""
revoke service account from google access group
Args:
session(current_session): db session
to_delete_project_ids (List(str)): list of project ids
google_project_id (str): google project id
service_account (UserServiceAccount): user service account
Returns:
None
"""
for project_id in to_delete_project_ids:
access_groups = _get_google_access_groups(session, project_id)
for access_group in access_groups:
try:
# TODO: Need to remove outer try/catch after major refactor
with GoogleCloudManager(
google_project_id, use_default=False
) as g_manager:
if not g_manager.remove_member_from_group(
member_email=service_account.email, group_id=access_group.email
):
logger.debug(
"Removed {} from google group {}".format(
service_account.email, access_group.email
)
)
else:
raise GoogleAPIError("Can not remove {} from group {}")
except Exception as exc:
raise GoogleAPIError(
"Can not remove {} from group {}. Detail {}".format(
service_account.email, access_group.email, exc
)
)
def add_user_service_account_to_google(
session, to_add_project_ids, google_project_id, service_account
):
"""
Add service account to Google access groups
Args:
session(current_session): db session
to_add_project_ids (List(id)): list of project ids
google_project_id (str): google project id
service_account (UserServiceAccount): user service account
"""
logger.debug(
"attempting to add {} to groups for projects: {}".format(
service_account, to_add_project_ids
)
)
for project_id in to_add_project_ids:
access_groups = _get_google_access_groups(session, project_id)
logger.debug(
"google group(s) for project {}: {}".format(project_id, access_groups)
)
for access_group in access_groups:
try:
# TODO: Need to remove try/catch after major refactor
with GoogleCloudManager(
google_project_id, use_default=False
) as g_manager:
response = g_manager.add_member_to_group(
member_email=service_account.email, group_id=access_group.email
)
if response.get("email", None):
logger.debug(
"Successfully add member {} to Google group {}.".format(
service_account.email, access_group.email
)
)
else:
raise GoogleAPIError(
"Can not add {} to Google group {}".format(
service_account.email, access_group.email
)
)
except Exception as exc:
raise GoogleAPIError(
"Can not add {} to Google group {}. Detail {}".format(
service_account.email, access_group.email, exc
)
)
def _revoke_user_service_account_from_db(
session, to_delete_project_ids, service_account
):
"""
Remove service account from GoogleBucketAccessGroup
Args:
session(current_session): db session
to_delete_ids(List(int)): List of bucket ids
service_account_email(str): service account email
Returns:
None
"""
for project_id in to_delete_project_ids:
access_project = (
session.query(ServiceAccountAccessPrivilege)
.filter_by(project_id=project_id, service_account_id=service_account.id)
.first()
)
session.delete(access_project)
access_groups = _get_google_access_groups(session, project_id)
for access_group in access_groups:
account_access_bucket_group = (
session.query(ServiceAccountToGoogleBucketAccessGroup)
.filter_by(
service_account_id=service_account.id,
access_group_id=access_group.id,
)
.first()
)
if account_access_bucket_group:
session.delete(account_access_bucket_group)
session.commit()
def add_user_service_account_to_db(session, to_add_project_ids, service_account):
"""
Add user service account to service account
access privilege and service account bucket access group
Args:
sess(current_session): db session
to_add_project_ids(List(int)): List of project id
service_account(UserServiceAccount): user service account
requested_expires_in(int): requested time (in seconds) during which
the SA has bucket access
Returns:
None
Contrains:
The service account is not in DB yet
"""
for project_id in to_add_project_ids:
session.add(
ServiceAccountAccessPrivilege(
project_id=project_id, service_account_id=service_account.id
)
)
access_groups = _get_google_access_groups(session, project_id)
# time until the SA will lose bucket access
# by default: use configured time or 7 days
default_expires_in = config.get(
"GOOGLE_USER_SERVICE_ACCOUNT_ACCESS_EXPIRES_IN", 604800
)
# use expires_in from request query params if it was provided and
# it was not greater than the default
expires_in = get_valid_expiration_from_request(
max_limit=default_expires_in,
default=default_expires_in,
)
# convert expires_in to timestamp
expiration_time = int(time.time() + expires_in)
for access_group in access_groups:
sa_to_group = ServiceAccountToGoogleBucketAccessGroup(
service_account_id=service_account.id,
expires=expiration_time,
access_group_id=access_group.id,
)
session.add(sa_to_group)
session.commit()
def get_registered_service_account_from_email(service_account_email):
"""
Parse email to get google project id
"""
session = get_db_session()
return (
session.query(UserServiceAccount).filter_by(email=service_account_email).first()
)
def get_google_project_from_user_managed_service_account_email(service_account_email):
"""
Parse email to get google project id for a User-Managed service account
"""
words = service_account_email.split("@")
return words[1].split(".")[0]
def get_service_account_email(id_from_url):
"""
Return email given it in id form from the url.
"""
return unquote(id_from_url)
def _get_google_access_groups(session, project_id):
"""
Get google access groups
Args:
session(current_session): db session
project_id (int): project id in db
Returns:
set(GoogleBucketAccessGroup)
"""
access_groups = set()
project = session.query(Project).filter_by(id=project_id).first()
for bucket in project.buckets:
groups = bucket.google_bucket_access_groups
access_groups.update(groups)
return access_groups
def extend_service_account_access(service_account_email, db=None):
"""
Extend the Google service accounts access to data by extending the
expiration time for each of the Google Bucket Access Groups it's in.
WARNING: This does NOT do any AuthZ, do before this.
Args:
service_account_email (str): service account email
db(str): db connection string
"""
session = get_db_session(db)
service_account = (
session.query(UserServiceAccount).filter_by(email=service_account_email).first()
)
if service_account:
bucket_access_groups = get_google_access_groups_for_service_account(
service_account
)
# timestamp at which the SA will lose bucket access
# by default: use configured | |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests common to all coder implementations."""
# pytype: skip-file
from __future__ import absolute_import
import collections
import enum
import logging
import math
import unittest
from builtins import range
from typing import Any
from typing import List
from typing import NamedTuple
import pytest
from apache_beam.coders import proto2_coder_test_messages_pb2 as test_message
from apache_beam.coders import coders
from apache_beam.coders import typecoders
from apache_beam.internal import pickler
from apache_beam.runners import pipeline_context
from apache_beam.transforms import userstate
from apache_beam.transforms import window
from apache_beam.transforms.window import GlobalWindow
from apache_beam.typehints import sharded_key_type
from apache_beam.typehints import typehints
from apache_beam.utils import timestamp
from apache_beam.utils import windowed_value
from apache_beam.utils.sharded_key import ShardedKey
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from . import observable
try:
import dataclasses
except ImportError:
dataclasses = None # type: ignore
MyNamedTuple = collections.namedtuple('A', ['x', 'y'])
MyTypedNamedTuple = NamedTuple('MyTypedNamedTuple', [('f1', int), ('f2', str)])
class MyEnum(enum.Enum):
E1 = 5
E2 = enum.auto()
E3 = 'abc'
MyIntEnum = enum.IntEnum('MyIntEnum', 'I1 I2 I3')
MyIntFlag = enum.IntFlag('MyIntFlag', 'F1 F2 F3')
MyFlag = enum.Flag('MyFlag', 'F1 F2 F3') # pylint: disable=too-many-function-args
class DefinesGetState:
def __init__(self, value):
self.value = value
def __getstate__(self):
return self.value
def __eq__(self, other):
return type(other) is type(self) and other.value == self.value
class DefinesGetAndSetState(DefinesGetState):
def __setstate__(self, value):
self.value = value
# Defined out of line for picklability.
class CustomCoder(coders.Coder):
def encode(self, x):
return str(x + 1).encode('utf-8')
def decode(self, encoded):
return int(encoded) - 1
if dataclasses is not None:
@dataclasses.dataclass(frozen=True)
class FrozenDataClass:
a: Any
b: int
@dataclasses.dataclass
class UnFrozenDataClass:
x: int
y: int
# These tests need to all be run in the same process due to the asserts
# in tearDownClass.
@pytest.mark.no_xdist
class CodersTest(unittest.TestCase):
# These class methods ensure that we test each defined coder in both
# nested and unnested context.
# Common test values representing Python's built-in types.
test_values_deterministic: List[Any] = [
None,
1,
-1,
1.5,
b'str\0str',
u'unicode\0\u0101',
(),
(1, 2, 3),
[],
[1, 2, 3],
True,
False,
]
test_values = test_values_deterministic + [
dict(),
{
'a': 'b'
},
{
0: dict(), 1: len
},
set(),
{'a', 'b'},
len,
]
@classmethod
def setUpClass(cls):
cls.seen = set()
cls.seen_nested = set()
@classmethod
def tearDownClass(cls):
standard = set(
c for c in coders.__dict__.values() if isinstance(c, type) and
issubclass(c, coders.Coder) and 'Base' not in c.__name__)
standard -= set([
coders.Coder,
coders.AvroGenericCoder,
coders.DeterministicProtoCoder,
coders.FastCoder,
coders.ListLikeCoder,
coders.ProtoCoder,
coders.ToBytesCoder
])
cls.seen_nested -= set([coders.ProtoCoder, CustomCoder])
assert not standard - cls.seen, str(standard - cls.seen)
assert not cls.seen_nested - standard, str(cls.seen_nested - standard)
@classmethod
def _observe(cls, coder):
cls.seen.add(type(coder))
cls._observe_nested(coder)
@classmethod
def _observe_nested(cls, coder):
if isinstance(coder, coders.TupleCoder):
for c in coder.coders():
cls.seen_nested.add(type(c))
cls._observe_nested(c)
def check_coder(self, coder, *values, **kwargs):
context = kwargs.pop('context', pipeline_context.PipelineContext())
test_size_estimation = kwargs.pop('test_size_estimation', True)
assert not kwargs
self._observe(coder)
for v in values:
self.assertEqual(v, coder.decode(coder.encode(v)))
if test_size_estimation:
self.assertEqual(coder.estimate_size(v), len(coder.encode(v)))
self.assertEqual(
coder.estimate_size(v), coder.get_impl().estimate_size(v))
self.assertEqual(
coder.get_impl().get_estimated_size_and_observables(v),
(coder.get_impl().estimate_size(v), []))
copy1 = pickler.loads(pickler.dumps(coder))
copy2 = coders.Coder.from_runner_api(coder.to_runner_api(context), context)
for v in values:
self.assertEqual(v, copy1.decode(copy2.encode(v)))
if coder.is_deterministic():
self.assertEqual(copy1.encode(v), copy2.encode(v))
def test_custom_coder(self):
self.check_coder(CustomCoder(), 1, -10, 5)
self.check_coder(
coders.TupleCoder((CustomCoder(), coders.BytesCoder())), (1, b'a'),
(-10, b'b'), (5, b'c'))
def test_pickle_coder(self):
coder = coders.PickleCoder()
self.check_coder(coder, *self.test_values)
def test_deterministic_coder(self):
coder = coders.FastPrimitivesCoder()
deterministic_coder = coders.DeterministicFastPrimitivesCoder(coder, 'step')
self.check_coder(deterministic_coder, *self.test_values_deterministic)
for v in self.test_values_deterministic:
self.check_coder(coders.TupleCoder((deterministic_coder, )), (v, ))
self.check_coder(
coders.TupleCoder(
(deterministic_coder, ) * len(self.test_values_deterministic)),
tuple(self.test_values_deterministic))
with self.assertRaises(TypeError):
self.check_coder(deterministic_coder, dict())
with self.assertRaises(TypeError):
self.check_coder(deterministic_coder, [1, dict()])
self.check_coder(
coders.TupleCoder((deterministic_coder, coder)), (1, dict()),
('a', [dict()]))
self.check_coder(deterministic_coder, test_message.MessageA(field1='value'))
self.check_coder(
deterministic_coder, [MyNamedTuple(1, 2), MyTypedNamedTuple(1, 'a')])
if dataclasses is not None:
self.check_coder(deterministic_coder, FrozenDataClass(1, 2))
with self.assertRaises(TypeError):
self.check_coder(deterministic_coder, UnFrozenDataClass(1, 2))
with self.assertRaises(TypeError):
self.check_coder(
deterministic_coder, FrozenDataClass(UnFrozenDataClass(1, 2), 3))
with self.assertRaises(TypeError):
self.check_coder(
deterministic_coder, MyNamedTuple(UnFrozenDataClass(1, 2), 3))
self.check_coder(deterministic_coder, list(MyEnum))
self.check_coder(deterministic_coder, list(MyIntEnum))
self.check_coder(deterministic_coder, list(MyIntFlag))
self.check_coder(deterministic_coder, list(MyFlag))
self.check_coder(
deterministic_coder,
[DefinesGetAndSetState(1), DefinesGetAndSetState((1, 2, 3))])
with self.assertRaises(TypeError):
self.check_coder(deterministic_coder, DefinesGetState(1))
with self.assertRaises(TypeError):
self.check_coder(deterministic_coder, DefinesGetAndSetState(dict()))
def test_dill_coder(self):
cell_value = (lambda x: lambda: x)(0).__closure__[0]
self.check_coder(coders.DillCoder(), 'a', 1, cell_value)
self.check_coder(
coders.TupleCoder((coders.VarIntCoder(), coders.DillCoder())),
(1, cell_value))
def test_fast_primitives_coder(self):
coder = coders.FastPrimitivesCoder(coders.SingletonCoder(len))
self.check_coder(coder, *self.test_values)
for v in self.test_values:
self.check_coder(coders.TupleCoder((coder, )), (v, ))
def test_fast_primitives_coder_large_int(self):
coder = coders.FastPrimitivesCoder()
self.check_coder(coder, 10**100)
def test_fake_deterministic_fast_primitives_coder(self):
coder = coders.FakeDeterministicFastPrimitivesCoder(coders.PickleCoder())
self.check_coder(coder, *self.test_values)
for v in self.test_values:
self.check_coder(coders.TupleCoder((coder, )), (v, ))
def test_bytes_coder(self):
self.check_coder(coders.BytesCoder(), b'a', b'\0', b'z' * 1000)
def test_bool_coder(self):
self.check_coder(coders.BooleanCoder(), True, False)
def test_varint_coder(self):
# Small ints.
self.check_coder(coders.VarIntCoder(), *range(-10, 10))
# Multi-byte encoding starts at 128
self.check_coder(coders.VarIntCoder(), *range(120, 140))
# Large values
MAX_64_BIT_INT = 0x7fffffffffffffff
self.check_coder(
coders.VarIntCoder(),
*[
int(math.pow(-1, k) * math.exp(k))
for k in range(0, int(math.log(MAX_64_BIT_INT)))
])
def test_float_coder(self):
self.check_coder(
coders.FloatCoder(), *[float(0.1 * x) for x in range(-100, 100)])
self.check_coder(
coders.FloatCoder(), *[float(2**(0.1 * x)) for x in range(-100, 100)])
self.check_coder(coders.FloatCoder(), float('-Inf'), float('Inf'))
self.check_coder(
coders.TupleCoder((coders.FloatCoder(), coders.FloatCoder())), (0, 1),
(-100, 100), (0.5, 0.25))
def test_singleton_coder(self):
a = 'anything'
b = 'something else'
self.check_coder(coders.SingletonCoder(a), a)
self.check_coder(coders.SingletonCoder(b), b)
self.check_coder(
coders.TupleCoder((coders.SingletonCoder(a), coders.SingletonCoder(b))),
(a, b))
def test_interval_window_coder(self):
self.check_coder(
coders.IntervalWindowCoder(),
*[
window.IntervalWindow(x, y) for x in [-2**52, 0, 2**52]
for y in range(-100, 100)
])
self.check_coder(
coders.TupleCoder((coders.IntervalWindowCoder(), )),
(window.IntervalWindow(0, 10), ))
def test_timestamp_coder(self):
self.check_coder(
coders.TimestampCoder(),
*[timestamp.Timestamp(micros=x) for x in (-1000, 0, 1000)])
self.check_coder(
coders.TimestampCoder(),
timestamp.Timestamp(micros=-1234567000),
timestamp.Timestamp(micros=1234567000))
self.check_coder(
coders.TimestampCoder(),
timestamp.Timestamp(micros=-1234567890123456000),
timestamp.Timestamp(micros=1234567890123456000))
self.check_coder(
coders.TupleCoder((coders.TimestampCoder(), coders.BytesCoder())),
(timestamp.Timestamp.of(27), b'abc'))
def test_timer_coder(self):
self.check_coder(
coders._TimerCoder(coders.StrUtf8Coder(), coders.GlobalWindowCoder()),
*[
userstate.Timer(
user_key="key",
dynamic_timer_tag="tag",
windows=(GlobalWindow(), ),
clear_bit=True,
fire_timestamp=None,
hold_timestamp=None,
paneinfo=None),
userstate.Timer(
user_key="key",
dynamic_timer_tag="tag",
windows=(GlobalWindow(), ),
clear_bit=False,
fire_timestamp=timestamp.Timestamp.of(123),
hold_timestamp=timestamp.Timestamp.of(456),
paneinfo=windowed_value.PANE_INFO_UNKNOWN)
])
def test_tuple_coder(self):
kv_coder = coders.TupleCoder((coders.VarIntCoder(), coders.BytesCoder()))
# Verify cloud object representation
self.assertEqual({
'@type': 'kind:pair',
'is_pair_like': True,
'component_encodings': [
coders.VarIntCoder().as_cloud_object(),
coders.BytesCoder().as_cloud_object()
],
},
kv_coder.as_cloud_object())
# Test binary representation
self.assertEqual(b'\x04abc', kv_coder.encode((4, b'abc')))
# Test unnested
self.check_coder(kv_coder, (1, b'a'), (-2, b'a' * 100), (300, b'abc\0' * 5))
# Test nested
self.check_coder(
coders.TupleCoder((
coders.TupleCoder((coders.PickleCoder(), coders.VarIntCoder())),
coders.StrUtf8Coder(),
coders.BooleanCoder())), ((1, 2), 'a', True),
((-2, 5), u'a\u0101' * 100, False), ((300, 1), 'abc\0' * 5, True))
def test_tuple_sequence_coder(self):
int_tuple_coder = coders.TupleSequenceCoder(coders.VarIntCoder())
self.check_coder(int_tuple_coder, (1, -1, 0), (), tuple(range(1000)))
self.check_coder(
coders.TupleCoder((coders.VarIntCoder(), int_tuple_coder)),
(1, (1, 2, 3)))
def test_base64_pickle_coder(self):
self.check_coder(coders.Base64PickleCoder(), 'a', 1, 1.5, (1, 2, 3))
def test_utf8_coder(self):
self.check_coder(coders.StrUtf8Coder(), 'a', u'ab\u00FF', u'\u0101\0')
def test_iterable_coder(self):
iterable_coder = coders.IterableCoder(coders.VarIntCoder())
# Verify cloud object representation
self.assertEqual({
'@type': 'kind:stream',
'is_stream_like': True,
'component_encodings': [coders.VarIntCoder().as_cloud_object()]
},
iterable_coder.as_cloud_object())
# Test unnested
self.check_coder(iterable_coder, [1], [-1, 0, 100])
# Test nested
self.check_coder(
coders.TupleCoder(
(coders.VarIntCoder(), coders.IterableCoder(coders.VarIntCoder()))),
(1, [1, 2, 3]))
def test_iterable_coder_unknown_length(self):
# Empty
self._test_iterable_coder_of_unknown_length(0)
# Single element
self._test_iterable_coder_of_unknown_length(1)
# Multiple elements
self._test_iterable_coder_of_unknown_length(100)
# Multiple elements with underlying stream buffer overflow.
self._test_iterable_coder_of_unknown_length(80000)
def _test_iterable_coder_of_unknown_length(self, count):
def iter_generator(count):
for i in range(count):
yield i
iterable_coder = coders.IterableCoder(coders.VarIntCoder())
self.assertCountEqual(
list(iter_generator(count)),
iterable_coder.decode(iterable_coder.encode(iter_generator(count))))
def test_list_coder(self):
list_coder = coders.ListCoder(coders.VarIntCoder())
# Test unnested
self.check_coder(list_coder, [1], [-1, 0, 100])
# Test nested
self.check_coder(
coders.TupleCoder((coders.VarIntCoder(), list_coder)), (1, [1, 2, 3]))
def test_windowedvalue_coder_paneinfo(self):
coder = coders.WindowedValueCoder(
coders.VarIntCoder(), coders.GlobalWindowCoder())
test_paneinfo_values = [
windowed_value.PANE_INFO_UNKNOWN,
windowed_value.PaneInfo(
True, True, windowed_value.PaneInfoTiming.EARLY, 0, -1),
windowed_value.PaneInfo(
True, False, windowed_value.PaneInfoTiming.ON_TIME, 0, 0),
windowed_value.PaneInfo(
True, False, windowed_value.PaneInfoTiming.ON_TIME, 10, 0),
windowed_value.PaneInfo(
False, True, windowed_value.PaneInfoTiming.ON_TIME, 0, 23),
windowed_value.PaneInfo(
False, True, windowed_value.PaneInfoTiming.ON_TIME, 12, 23),
windowed_value.PaneInfo(
False, False, windowed_value.PaneInfoTiming.LATE, 0, 123),
]
test_values = [
windowed_value.WindowedValue(123, 234, (GlobalWindow(), ), p)
for p in test_paneinfo_values
]
# Test unnested.
self.check_coder(
coder,
windowed_value.WindowedValue(
123, 234, (GlobalWindow(), ), windowed_value.PANE_INFO_UNKNOWN))
for value in test_values:
self.check_coder(coder, value)
# Test nested.
for value1 in test_values:
for value2 in test_values:
self.check_coder(coders.TupleCoder((coder, coder)), (value1, value2))
def test_windowed_value_coder(self):
coder = coders.WindowedValueCoder(
coders.VarIntCoder(), coders.GlobalWindowCoder())
# Verify cloud object representation
self.assertEqual({
'@type': 'kind:windowed_value',
'is_wrapper': True,
'component_encodings': [
coders.VarIntCoder().as_cloud_object(),
coders.GlobalWindowCoder().as_cloud_object(),
],
},
coder.as_cloud_object())
# Test binary representation
self.assertEqual(
b'\x7f\xdf;dZ\x1c\xac\t\x00\x00\x00\x01\x0f\x01',
coder.encode(window.GlobalWindows.windowed_value(1)))
# Test decoding large timestamp
self.assertEqual(
coder.decode(b'\x7f\xdf;dZ\x1c\xac\x08\x00\x00\x00\x01\x0f\x00'),
windowed_value.create(0, MIN_TIMESTAMP.micros, (GlobalWindow(), )))
# Test unnested
self.check_coder(
coders.WindowedValueCoder(coders.VarIntCoder()),
windowed_value.WindowedValue(3, -100, ()),
windowed_value.WindowedValue(-1, 100, (1, 2, 3)))
# Test Global Window
self.check_coder(
coders.WindowedValueCoder(
coders.VarIntCoder(), coders.GlobalWindowCoder()),
window.GlobalWindows.windowed_value(1))
# Test nested
self.check_coder(
coders.TupleCoder((
coders.WindowedValueCoder(coders.FloatCoder()),
coders.WindowedValueCoder(coders.StrUtf8Coder()))),
(
windowed_value.WindowedValue(1.5, 0, ()),
windowed_value.WindowedValue("abc", 10, ('window', ))))
def test_param_windowed_value_coder(self):
from apache_beam.transforms.window import IntervalWindow
from apache_beam.utils.windowed_value import PaneInfo
wv = windowed_value.create(
b'',
# Milliseconds to microseconds
1000 * 1000,
(IntervalWindow(11, 21), ),
PaneInfo(True, False, 1, 2, 3))
windowed_value_coder = coders.WindowedValueCoder(
coders.BytesCoder(), coders.IntervalWindowCoder())
payload | |
or of different materials connected by seams. Some or all of these materials can be
designed to fail when subjected to temperatures above a certain temperature range causing melting or some
other destructive process to occur to these materials. These failures can create access points from the
ceiling through the protective barrier to areas below being protected by the barrier, which can allow water
from a fire suppression system, typically located near the ceiling, to reach a fire located below the
protective barrier. '''
expected = ['Protective', 'barriers', 'ceilings', 'construction', 'roofs', 'protective barriers', 'material',
'seams', 'fail', 'temperatures', 'melting', 'destructive', 'access points', 'water', 'fire',
'fire suppression']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US08356946_20130122(self):
text = '''The interchangeable wheel bearing unit has a wheel hub and two tapered roller bearings, each having
an outer ring and an inner ring, between which a respective row of tapered rollers is situated,
and a securing ring in at least one of the inner rings of the tapered roller bearing. To assemble the wheel
bearing unit without special tools, each outer ring of the tapered roller bearings has a cylindrical
extension, which runs coaxially with the wheel hub axis towards the outer face of the bearing and into which
a respective seal is inserted, and a retaining element, which is supported on the corresponding inner ring
and axially fixes the outer ring, is located on the opposite face of the respective tapered roller bearing
from the seal. '''
expected = ['interchangeable', 'bearing', 'wheel', 'hub', 'tapered roller bearings', 'tapered', 'roller',
'outer ring', 'inner ring', 'ring', 'respective', 'assemble', 'tools', 'cylindrical extension',
'cylindrical', 'coaxially', 'retaining', 'retaining element', 'axially fixes']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US07349344_20080325(self):
text = '''An access network includes a test system controller that provides a test request in Signaling
Network Management Protocol (SNMP) messages to an element management system. A network gateway,
in conjunction with the element management system, provides test commands to a customer gateway over a Local
loop Emulation Service Embedded Operations Channel (LES-EOC). The customer gateway performs a subscriber loop
test on derived subscriber lines connected therewith. Results of the subscriber loop test are provided over
the LES-EOC to the gateway. The network gateway sends the results to the test system controller through the
element management system in SNMP messages. '''
expected = ['controller', 'test', 'Signaling', 'SNMP', 'Network', 'customer gateway', 'customer', 'Local loop',
'Emulation', 'LES-EOC', 'subscriber', 'gateway', 'network', 'Service']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US07476472_20090113(self):
text = '''The present invention provides a method for designing a mask. First, a main pattern including at
least a strip pattern is formed on the mask substrate. A shift feature is added to one end of the strip
pattern of the main pattern. Either the phase shift or the optical transmission or both of the shift feature
can be adjusted to optimize the resultant critical dimension between line-ends of the main pattern,
thus improving pullback of the line-ends of the strip pattern in the main pattern. '''
expected = ['mask', 'pattern', 'substrate', 'strip', 'strip feature', 'optical transmission', 'optical',
'transmission', 'optimize', 'critical', 'dimension', 'line-ends']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US09503700_20161122(self):
text = '''A phosphor wheel includes a rotating disk and a wavelength converting layer. The rotating disk has
a first surface and a second surface opposite to the first surface, in which the first surface forms a
coating region and a non-coating region. The wavelength converting layer is formed on the coating region of
the first surface for converting a light wavelength of a light beam. In addition, an embodiment of the
invention discloses a projection device having the phosphor wheel. When the rotating disk of the phosphor
wheel rotates, the recess portion may disturb the air around the phosphor wheel such that the temperature of
the wavelength converting layer may be effectively decreased. Simultaneously, the rotating disk has a stable
dynamic balance and the rotating disk has a larger heat dissipating region because the recess portion is
disposed on the rotating disk. '''
expected = ['phospher', 'wheel', 'rotating disk', 'wavelength', 'rotating', 'coating', 'non-coating',
'converting', 'light beam', 'projection device', 'phospher wheel', 'recess', 'temperature',
'dynamic balance', 'dynamic', 'heat dissipating', 'heat']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US07253825_20070807(self):
text = '''A method and an apparatus for driving an image display device, such as, a plasma display panel,
to represent a gradation. The pixels of a panel are classified into a plurality of groups, and one frame
period is divided with time into n subfields. An address period and a sustain period are sequentially
executed on each of the groups during at least two of the n subfields. While the pixels of one group are
undergoing an address period during a subfield, the pixels of the other groups remain idle. While the pixels
of one group are undergoing a sustain period, the pixels of groups that have already been addressed also
undergo the sustain period. During one subfield, different gradation weights are allocated to the groups. A
gradation of visual brightness for each pixel is determined by performing an address period for either all or
some of the groups during at least two subfields. The panel driving method for representing gradation is
adaptable. '''
expected = ['apparatus', 'driving', 'plasma display panel', 'plasma', 'gradation', 'pixels', 'subfields',
'sequentially', 'idle', 'sustain period', 'gradation weights', 'visual', 'adaptable']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US09435670_20160906(self):
text = '''A portable apparatus for sensing position of an oil port on a rotating element (e.g. the wheel or
track of a heavy vehicle) has a position sensor in electrical communication with a wireless transmitter. The
wireless transmitter is configured to receive signals from the position sensor and to send signals to a
wireless receiver. The apparatus further has a mounting structure (e.g. a magnet) for temporarily mounting
the apparatus on the rotating element. An indicium on the apparatus associated with a desired service action
to be performed on the rotating element correlates an angular position of the oil port to a pre-determined
reference position of the position sensor, the angular position of the oil port being a correct position for
performing the desired service action. In use, the apparatus is mounted on the rotating element so that the
indicium points at the oil port and then the rotating element is rotated until the receiver indicates that
the oil port is in the correct position. '''
expected = ['portable', 'apparatus', 'oil', 'rotating', 'wheel', 'vehicle', 'sensor', 'transmitter', 'wireless',
'signals', 'receiver', 'mounting', 'magnet', 'rotating element', 'indicium', 'correlates',
'angular', 'reference position', 'oil port']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US06959911_20051101(self):
text = '''A valve features a deflectable actuating element ( 1 ) that controls the movements of at least one
actual sealing element ( 3 ) for opening and/or closing at least one sealing contour ( 6 ), and the actuating
element is loaded via an elastic element ( 5 ) essentially perpendicularly relative to the direction of the
deflection; and the longitudinal axis of the actuating element and the force applied by the elastic element
are aligned in a position of the actuating element that is between its two extreme positions. The actuating
element ( 1 ) is preferably an unilaterally loaded piezoelectric bending transducer, and the sealing element
( 3 ) is preferably a toggle that | |
#put the columns two at a time in a dataframe
# dataframe and visualization tools
import pandas as pd
import numpy as np
import matplotlib as mlp
import time
from matplotlib import pyplot as plt
import wx
import os
import numpy.polynomial.polynomial as poly
import statistics as stats
from statistics import mode
from scipy.fft import *
import warnings
warnings.filterwarnings("ignore")
#style and formating
pd.options.display.float_format = '{:.15f}'.format
mlp.style.use('tableau-colorblind10')
mlp.rcParams['figure.dpi']= 300
mlp.rcParams['font.family'] = 'Arial'
mlp.rcParams['figure.figsize'] = [14, 10]
mlp.rcParams['figure.facecolor'] = 'white'
mlp.rcParams['axes.edgecolor'] = 'grey'
mlp.rcParams['axes.spines.top'] = False
mlp.rcParams['axes.spines.right'] = False
mlp.rcParams['axes.xmargin'] = 0.15
mlp.rcParams['axes.ymargin'] = 0.15
class NoiseAnalysis():
def __init__(self):
self.samples_list=[]
self.noise_list=[]
self.LoD_list=[]
self.LoQ_list=[]
self.true = ['1', 't', 'tr', 'tru', 'true', 'truee', 'y', 'ye', 'yes', 'yess', 'yeah', 'yu', 'yup', 'yupp', 'sure', 'certainly', 'yay']
self.ChangeDefaults = 'False'
self.SetSampleSize = 'False'
self.SampleSize = 20000
self.SelectRange = 'False'
self.Start = -100000
self.End = 100000
self.DetectSignal ='True'
self.Threshold = 1.0
self.PolyFit = 'True'
self.Degree = 4
self.RemoveOutliers = 'False'
self.nStandardDeviations = 0.0
self.FourierApproximation = 'True'
self.nHarmonics = 10
self.RMS_noise_summary = pd.DataFrame()
#open a windows file explorer and select file path; save file path
def get_paths(self):
app = wx.App(None)
style = wx.FD_MULTIPLE
dialog = wx.FileDialog(None, 'Select File', wildcard='*.csv;*.arw', style=style)
if dialog.ShowModal() == wx.ID_OK:
paths = dialog.GetPaths()
else:
paths = None
dialog.Destroy()
return paths
#read file and save data to a Dataframe
def read_files(self, paths):
df = pd.DataFrame()
for path in paths:
if path[-4:] == '.arw':
temp_df = pd.read_csv(path, delimiter="\t", header=None)
temp_df = temp_df[pd.to_numeric(temp_df[0], errors='coerce').notnull()].reset_index(drop=True)
df = pd.concat([df, temp_df], axis=1)
elif path[-4:] == '.csv':
temp_df = pd.read_csv(path)
df = pd.concat([df, temp_df], axis=1)
else:
pass
return df.astype('float')
#interactive dialog with user
def user_input(self):
print(f'The program\'s default settings are:')
print(f'''
SelectRange: {self.SelectRange},
DetectSignal: {self.DetectSignal}, Threshold: {self.Threshold},
PolyFit: {self.PolyFit}, Degree: {self.Degree}, RemoveOutliers: {self.RemoveOutliers}, nStandardDeviations: {self.nStandardDeviations},
FourierApproximation: {self.FourierApproximation}, nHarmonics: {self.nHarmonics}''')
print('')
self.ChangeDefaults = input('Would you like to make any changes? ')
if self.ChangeDefaults.lower() in self.true:
self.SelectRange = input(f'Would you like to enter a specific range? ') or self.SelectRange
if self.SelectRange.lower() in self.true:
self.Start = input(f'Start: ') or self.Start
self.End = input(f'End: ') or self.End
self.DetectSignal = input(f'Detect signals? ') or self.DetectSignal
if self.DetectSignal.lower() in self.true:
self.Threshold = input(f'Signal detection threshold: ') or self.Threshold
self.PolyFit = input(f'Polynomial fit? ') or self.PolyFit
if self.PolyFit.lower() in self.true:
self.Degree = input(f'Polynomial fit degree: ') or self.Degree
self.RemoveOutliers = input(f'Remove Outliers? ') or self.RemoveOutliers
if self.RemoveOutliers.lower() in self.true:
self.nStandardDeviations = input(f'Number of standard deviation: ') or self.nStandardDeviations
self.FourierApproximation = input(f'Fourier approximation? ') or self.FourierApproximation
if self.FourierApproximation.lower() in self.true:
self.nHarmonics = input(f'Number of harmonics to use: ') or self.nHarmonics
print('')
print(f'Your settings are:')
print(f'''
SelectRange: {self.SelectRange},
DetectSignal: {self.DetectSignal}, Threshold: {self.Threshold},
PolyFit: {self.PolyFit}, Degree: {self.Degree}, RemoveOutliers: {self.RemoveOutliers}, nStandardDeviations: {self.nStandardDeviations},
FourierApproximation: {self.FourierApproximation}, nHarmonics: {self.nHarmonics}''')
print('')
return None
#option to control sample size
def set_sample_size(self, x, y, sample_size):
x_new = np.linspace(min(x), max(x), sample_size)
# Where you want to interpolate
y_new = np.interp(x_new, x, y)
return x_new, y_new
#option to select a specific range to operate on
def select_range(self, x, y, Start, End):
keep = np.zeros(len(x))
for i in range(len(x)):
if x[i] > Start and x[i] < End:
keep[i] = 1
return x[keep==1], y[keep==1]
#classify each data point as either baseline (0) or signal (1)
def signal_baseline_classifier(self, y, signal_threshold, lag_fraction=0.03, draw_baseline=True):
#use a SMA as a lagging baseline to determine signal
lag = int(len(y)*lag_fraction)
len_data = len(y)
threshold = signal_threshold*y.std() #min(y.std()/10 , y[:lag].std())
signal = np.zeros(len_data)
for i in range(lag, len_data):
SMA_i = np.sum(y[i-lag:i+lag])/len(y[i-lag:i+lag])
if abs(y[i]-SMA_i) >= threshold:
signal[i] = 1
#correct any false negatives points by conforming to nearest n neighboors
n_neighbors = max(1, int(lag/5))
s = signal.copy()
for i in range(n_neighbors, len_data):
if s[i] == 0 and mode(s[i-n_neighbors:i+n_neighbors]) == 1:
signal[i-n_neighbors:i+n_neighbors] = mode(s[i-n_neighbors:i+n_neighbors])
#characterize baseline points around signals as signals to reduce false negatives
s = signal.copy()
for i in range(n_neighbors,len_data):
if s[i] == 1:
signal[i-3*n_neighbors:i+3*n_neighbors] = 1
#recreate baseline as a copy of y without signals
if draw_baseline:
baseline = pd.Series(y.copy())
baseline[signal==1] = np.nan
baseline = baseline.interpolate()
for i in range(len_data):
baseline[i] = min(y[i], baseline[i])
else:
baseline = 'N/a'
return signal
#creat a tunnel-like polynomial fit of the baseline; this is can be used to flatten and/or remove outliers
def polynomial_tunnel_fit(self, x, y, deg, n_std, n_iters=1, remove_outliers=True, flatten=True):
#Runge's phenomenon is a known issue with this method
i = 0
outlier = np.zeros(len(y))
while i < n_iters:
coefs = poly.polyfit(x, y, deg)
ffit = poly.polyval(x, coefs)
top = ffit + n_std*y.std()
base = ffit - n_std*y.std()
if remove_outliers:
toutlier = y > top
boutlier = y < base
outlier = toutlier | boutlier
x = x[~outlier]
y = y[~outlier]
top = top[~outlier]
base = base[~outlier]
if flatten:
y = y-base
y = y-y.mean()
if i == 0:
int_top = top
int_base = base
i += 1
return x, y, int_top, int_base
def fourier_transformation_approximation(self, y, nHarmonics):
# Number of samples in our input signal
N = len(y)
#This function computes a fourier series representation of our input signal;
#using the 1-D discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT).
fourier_series = rfft(y)
#reconstruct signal from the inverse of the real valued components of the fourier series
#only use the first 'n' number pf preodic components from the fourier series to reconstruct the signal
y_approx = irfft(fourier_series[:nHarmonics], N)
return y_approx
#produce short report
def short_report_grapher(self, x2, y2, LoB, LoD, LoQ):
#plot cleaned baselines + LOD/LOQ thresholds
fig, ax = plt.subplots(figsize=(12, 6))
plt.gca().ticklabel_format(axis='both', style='plain', useOffset=False)
plt.suptitle(f'{df.columns[1]}', fontsize=12, y = 0.94, fontweight='bold')
ax.scatter(x2, y2, s=0.5)
LoB_=ax.hlines(LoB, xmin=min(x), xmax=max(x), linestyle= '-', alpha=0.4, linewidth=0.6)
LoD_=ax.hlines(LoB+LoD, xmin=min(x2), xmax=max(x2), linestyle= ':', alpha=0.9, linewidth=1.2)
LoQ_=ax.hlines(LoB+LoQ, xmin=min(x2), xmax=max(x2), linestyle= '-', alpha=0.9, linewidth=1.2)
ax.set_xlabel(f'{df.columns[0]}', fontsize=11)
ax.set_ylabel('signal', fontsize=11)
ax.legend([LoQ_, LoD_], ['LoQ', 'LoD'], frameon=False, bbox_to_anchor=(1.05, 1), loc='upper right', handlelength=0.5)
return fig.savefig(f'{os.path.dirname(path)}\\{fig._suptitle.get_text()}_rms_noise.png', facecolor=fig.get_facecolor(), dpi=fig.dpi)
na = NoiseAnalysis()
paths = na.get_paths()
input_data = na.read_files(paths=paths)
na.user_input()
print('')
print(f'working...')
numcols = len(input_data.columns)
for i in range(numcols):
#i = 0,2,4,6,8...etc.
if i%2 == 0:
#generate temporary dataframe and define temp x,y
df = pd.DataFrame(input_data.iloc[:,i:i+2]).dropna()
N = int(len(df)*0.015)
x = df.iloc[N:-N,0].values
y = df.iloc[N:-N,1].values
if na.SetSampleSize.lower() in na.true:
x, y = na.set_sample_size(x, y, sample_size= na.SampleSize)
x2 = x.copy()
y2 = y.copy()
signal=np.zeros(len(x2))
fig, axs = plt.subplots(2, 2)
plt.gca().ticklabel_format(axis='both', style='plain', useOffset=False)
plt.suptitle(f'{df.columns[1]}', fontsize=12, y = 0.94, fontweight='bold')
if na.SelectRange.lower() in na.true:
x, y = na.select_range(x2, y2, int(na.Start), int(na.End))
x2 = x.copy()
y2 = y.copy()
signal=np.zeros(len(x2))
if na.DetectSignal.lower() in na.true:
signal = na.signal_baseline_classifier(y=y, signal_threshold= float(na.Threshold), lag_fraction=0.03, draw_baseline=True)
b=axs[0, 0].scatter(x[signal==0], y[signal==0], s=0.2)
s=axs[0, 0].scatter(x[signal==1], y[signal==1], s=0.2)
axs[0, 0].set_title(f'Signal Detection (threshold={na.Threshold})', fontsize=10)
axs[0, 0].set_xlabel(f'{df.columns[0]}', fontsize=9)
axs[0, 0].set_ylabel(f'signal', fontsize=9)
axs[0, 0].legend([b, s],[ 'Baseline', 'Signal'], fontsize=8, frameon=False, bbox_to_anchor=(1.05, 1), loc='upper right', handlelength=1)
x2 = x2[signal==0]
y2 = y2[signal==0]
if na.PolyFit.lower() in na.true:
x2, y2, topline, bottomline = na.polynomial_tunnel_fit(x2, y2, deg=int(na.Degree), n_std=float(na.nStandardDeviations), n_iters=1,
remove_outliers= na.RemoveOutliers.lower() in na.true, flatten=True)
b=axs[0, 1].scatter(x[signal==0], y[signal==0], s=0.2)
t=axs[0, 1].scatter(x2, topline, s=0.2, color='#ff7f0e', alpha=0.4, linewidth=0.6)
b2=axs[0, 1].scatter(x2, bottomline, s=0.2, color='#ff7f0e', alpha=0.4, linewidth=0.6)
axs[0, 1].set_title(f'Polynomial Fit (degree={na.Degree})', fontsize=10)
axs[0, 1].set_xlabel(f'{df.columns[0]}', fontsize=9)
axs[0, 1].set_ylabel(f'signal', fontsize=9)
axs[0, 1].legend([b, t],['Baseline', 'Polynomial Fit'], fontsize=8, frameon=False, bbox_to_anchor=(1.05, 1), loc='upper right', handlelength=1)
if na.FourierApproximation.lower() in na.true:
y_approx = na.fourier_transformation_approximation(y=y2, nHarmonics=int(na.nHarmonics))
b=axs[1, 0].scatter(x2, y2, s=0.2)
a=axs[1, 0].scatter(x2, y_approx, s=0.2, color='#ff7f0e', alpha=0.4, linewidth=0.4)
axs[1, 0].set_title(f'Fourier Approximation Using The First {na.nHarmonics} Harmonics', fontsize=10)
axs[1, 0].set_xlabel(f'{df.columns[0]}', fontsize=9)
axs[1, 0].set_ylabel(f'signal', fontsize=9)
axs[1, 0].legend([b, a],[ 'Baseline', 'Fourier Approximation'], fontsize=8, frameon=False, bbox_to_anchor=(1.05, 1), loc='upper right', handlelength=1)
y2=y2-y_approx
#calculate LOD/LOQ
y2 = y2 - y2.mean()
noise = y2.std()
LoD = 3*noise
LoQ = 10*noise
#graph 4th quadrant with final baseline and LoD/LoQ horizontal lines
axs[1, 1].scatter(x2, y2, s=0.2)
axs[1, 1].hlines(0, xmin=min(x2), xmax=max(x2), linestyle= '-', color='#000000', alpha=0.4, linewidth=0.6)
LoD_line=axs[1, 1].hlines(LoD, xmin=min(x2), xmax=max(x2), linestyle= ':', color='#000000', alpha=0.8, linewidth=1.0)
LoQ_line=axs[1, 1].hlines(LoQ, xmin=min(x2), xmax=max(x2), linestyle= '-', color='#000000', alpha=0.8, linewidth=1.0)
axs[1, 1].set_title('Baseline Noise Evaluation', fontsize=10)
axs[1, 1].set_xlabel(f'{df.columns[0]}', fontsize=9)
axs[1, 1].set_ylabel('signal', fontsize=9)
axs[1, 1].legend([LoQ_line, LoD_line], ['LoQ', 'LoD'], fontsize=8, frameon=False, bbox_to_anchor=(1.05, 1), loc='upper right', handlelength=0.5)
fig.savefig(f'{os.path.dirname(paths[0])}\\_{fig._suptitle.get_text()}_rms_noise.png', facecolor=fig.get_facecolor(), dpi=fig.dpi)
#collect | |
<filename>autotest/gcore/basic_test.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test basic GDAL open
# Author: <NAME> <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2008-2013, <NAME> <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append( '../pymod' )
import gdaltest
from osgeo import gdal
# Nothing exciting here. Just trying to open non existing files,
# or empty names, or files that are not valid datasets...
def matches_non_existing_error_msg(msg):
m1 = "does not exist in the file system,\nand is not recognized as a supported dataset name.\n" in msg
m2 = msg == 'No such file or directory'
return m1 or m2
def basic_test_1():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('non_existing_ds', gdal.GA_ReadOnly)
gdal.PopErrorHandler()
if ds is None and matches_non_existing_error_msg(gdal.GetLastErrorMsg()):
return 'success'
else:
gdaltest.post_reason('did not get expected error message, got %s' % gdal.GetLastErrorMsg())
return 'fail'
def basic_test_2():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('non_existing_ds', gdal.GA_Update)
gdal.PopErrorHandler()
if ds is None and matches_non_existing_error_msg(gdal.GetLastErrorMsg()):
return 'success'
else:
gdaltest.post_reason('did not get expected error message, got %s' % gdal.GetLastErrorMsg())
return 'fail'
def basic_test_3():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('', gdal.GA_ReadOnly)
gdal.PopErrorHandler()
if ds is None and matches_non_existing_error_msg(gdal.GetLastErrorMsg()):
return 'success'
else:
gdaltest.post_reason('did not get expected error message, got %s' % gdal.GetLastErrorMsg())
return 'fail'
def basic_test_4():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('', gdal.GA_Update)
gdal.PopErrorHandler()
if ds is None and matches_non_existing_error_msg(gdal.GetLastErrorMsg()):
return 'success'
else:
gdaltest.post_reason('did not get expected error message, got %s' % gdal.GetLastErrorMsg())
return 'fail'
def basic_test_5():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('data/doctype.xml', gdal.GA_ReadOnly)
gdal.PopErrorHandler()
if ds is None and gdal.GetLastErrorMsg() == '`data/doctype.xml\' not recognized as a supported file format.\n':
return 'success'
else:
return 'fail'
###############################################################################
# Issue several AllRegister() to check that GDAL drivers are good citizens
def basic_test_6():
gdal.AllRegister()
gdal.AllRegister()
gdal.AllRegister()
return 'success'
###############################################################################
# Test fix for #3077 (check that errors are cleared when using UseExceptions())
def basic_test_7_internal():
try:
gdal.Open('non_existing_ds', gdal.GA_ReadOnly)
gdaltest.post_reason('opening should have thrown an exception')
return 'fail'
except:
# Special case: we should still be able to get the error message
# until we call a new GDAL function
if not matches_non_existing_error_msg(gdal.GetLastErrorMsg()):
gdaltest.post_reason('did not get expected error message, got %s' % gdal.GetLastErrorMsg())
return 'fail'
if gdal.GetLastErrorType() == 0:
gdaltest.post_reason('did not get expected error type')
return 'fail'
# Should issue an implicit CPLErrorReset()
gdal.GetCacheMax()
if gdal.GetLastErrorType() != 0:
gdaltest.post_reason('got unexpected error type')
return 'fail'
return 'success'
def basic_test_7():
old_use_exceptions_status = gdal.GetUseExceptions()
gdal.UseExceptions()
ret = basic_test_7_internal()
if old_use_exceptions_status == 0:
gdal.DontUseExceptions()
return ret
###############################################################################
# Test gdal.VersionInfo('RELEASE_DATE') and gdal.VersionInfo('LICENSE')
def basic_test_8():
ret = gdal.VersionInfo('RELEASE_DATE')
if len(ret) != 8:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
python_exe = sys.executable
if sys.platform == 'win32':
python_exe = python_exe.replace('\\', '/')
ret = gdaltest.runexternal(python_exe + ' basic_test.py LICENSE 0')
if ret.find('GDAL/OGR is released under the MIT/X license') != 0 and ret.find('GDAL/OGR Licensing') < 0:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
f = open('tmp/LICENSE.TXT', 'wt')
f.write('fake_license')
f.close()
ret = gdaltest.runexternal(python_exe + ' basic_test.py LICENSE 1')
os.unlink('tmp/LICENSE.TXT')
if ret.find('fake_license') != 0 and ret.find('GDAL/OGR Licensing') < 0:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test gdal.PushErrorHandler() with a Python error handler
def my_python_error_handler(eErrClass, err_no, msg):
gdaltest.eErrClass = eErrClass
gdaltest.err_no = err_no
gdaltest.msg = msg
def basic_test_9():
gdaltest.eErrClass = 0
gdaltest.err_no = 0
gdaltest.msg = ''
gdal.PushErrorHandler(my_python_error_handler)
gdal.Error(1,2,'test')
gdal.PopErrorHandler()
if gdaltest.eErrClass != 1:
gdaltest.post_reason('fail')
return 'fail'
if gdaltest.err_no != 2:
gdaltest.post_reason('fail')
return 'fail'
if gdaltest.msg != 'test':
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test gdal.PushErrorHandler() with a Python error handler as a method (#5186)
class my_python_error_handler_class:
def __init__(self):
self.eErrClass = None
self.err_no = None
self.msg = None
def handler(self, eErrClass, err_no, msg):
self.eErrClass = eErrClass
self.err_no = err_no
self.msg = msg
def basic_test_10():
# Check that reference counting works OK
gdal.PushErrorHandler(my_python_error_handler_class().handler)
gdal.Error(1,2,'test')
gdal.PopErrorHandler()
error_handler = my_python_error_handler_class()
gdal.PushErrorHandler(error_handler.handler)
gdal.Error(1,2,'test')
gdal.PopErrorHandler()
if error_handler.eErrClass != 1:
gdaltest.post_reason('fail')
return 'fail'
if error_handler.err_no != 2:
gdaltest.post_reason('fail')
return 'fail'
if error_handler.msg != 'test':
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test gdal.OpenEx()
def basic_test_11():
ds = gdal.OpenEx('data/byte.tif')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_RASTER)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_VECTOR)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_RASTER | gdal.OF_VECTOR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_ALL)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_UPDATE)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_RASTER | gdal.OF_VECTOR | gdal.OF_UPDATE | gdal.OF_VERBOSE_ERROR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', allowed_drivers = [] )
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', allowed_drivers = ['GTiff'] )
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', allowed_drivers = ['PNG'] )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
with gdaltest.error_handler():
ds = gdal.OpenEx('data/byte.tif', open_options = ['FOO'] )
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ar_ds = [ gdal.OpenEx('data/byte.tif', gdal.OF_SHARED) for i in range(1024) ]
if ar_ds[1023] is None:
gdaltest.post_reason('fail')
return 'fail'
ar_ds = None
ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_RASTER)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_VECTOR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayer(0) is None:
gdaltest.post_reason('fail')
return 'fail'
ds.GetLayer(0).GetMetadata()
ds = gdal.OpenEx('../ogr/data/poly.shp', allowed_drivers = ['ESRI Shapefile'] )
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_RASTER | gdal.OF_VECTOR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('non existing')
if ds is not None or gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.OpenEx('non existing', gdal.OF_VERBOSE_ERROR)
gdal.PopErrorHandler()
if ds is not None or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
old_use_exceptions_status = gdal.GetUseExceptions()
gdal.UseExceptions()
got_exception = False
try:
ds = gdal.OpenEx('non existing')
except:
got_exception = True
if old_use_exceptions_status == 0:
gdal.DontUseExceptions()
if not got_exception:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test GDAL layer API
def basic_test_12():
ds = gdal.GetDriverByName('MEMORY').Create('bar', 0, 0, 0)
if ds.GetDescription() != 'bar':
gdaltest.post_reason('failure')
print(ds.GetDescription())
return 'fail'
lyr = ds.CreateLayer("foo")
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
if lyr.GetDescription() != 'foo':
gdaltest.post_reason('failure')
print(lyr.GetDescription())
return 'fail'
from osgeo import ogr
if lyr.TestCapability(ogr.OLCCreateField) != 1:
gdaltest.post_reason('failure')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('failure')
return 'fail'
lyr = ds.GetLayerByName("foo")
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
lyr = ds.GetLayerByIndex(0)
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
sql_lyr = ds.ExecuteSQL('SELECT * FROM foo')
if sql_lyr is None:
gdaltest.post_reason('failure')
return 'fail'
ds.ReleaseResultSet(sql_lyr)
new_lyr = ds.CopyLayer(lyr, 'bar')
if new_lyr is None:
gdaltest.post_reason('failure')
return 'fail'
if ds.DeleteLayer(0) != 0:
gdaltest.post_reason('failure')
return 'fail'
if ds.DeleteLayer('bar') != 0:
gdaltest.post_reason('failure')
return 'fail'
ds.SetStyleTable(ds.GetStyleTable())
ds = None
return 'success'
###############################################################################
# Test correct sorting of StringList / metadata (#5540, #5557)
def basic_test_13():
ds = gdal.GetDriverByName('MEM').Create('',1,1)
for i in range(3):
if i == 0:
ds.SetMetadataItem("ScaleBounds","True")
ds.SetMetadataItem("ScaleBounds.MinScale","0")
ds.SetMetadataItem("ScaleBounds.MaxScale","2000000")
elif i == 1:
ds.SetMetadataItem("ScaleBounds.MaxScale","2000000")
ds.SetMetadataItem("ScaleBounds.MinScale","0")
ds.SetMetadataItem("ScaleBounds","True")
else:
ds.SetMetadataItem("ScaleBounds.MinScale","0")
ds.SetMetadataItem("ScaleBounds","True")
ds.SetMetadataItem("ScaleBounds.MaxScale","2000000")
if ds.GetMetadataItem('scalebounds') != 'True':
gdaltest.post_reason('failure')
return 'fail'
if ds.GetMetadataItem('ScaleBounds') != 'True':
gdaltest.post_reason('failure')
return 'fail'
if ds.GetMetadataItem('SCALEBOUNDS') != 'True':
gdaltest.post_reason('failure')
return 'fail'
if ds.GetMetadataItem('ScaleBounds.MinScale') != '0':
gdaltest.post_reason('failure')
return 'fail'
if ds.GetMetadataItem('ScaleBounds.MaxScale') != '2000000':
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.GetDriverByName('MEM').Create('',1,1)
for i in range(200):
ds.SetMetadataItem("FILENAME_%d" % i, "%d" % i)
for i in range(200):
if ds.GetMetadataItem("FILENAME_%d" % i) != '%d' % i:
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# | |
"""
raise NotImplementedError
def Save(self):
"""Saves the changes made to the record.
This performs an update to the record, except when `create_new` if set to
True, in which case the record is inserted.
Arguments:
% create_new: bool ~~ False
Tells the method to create a new record instead of updating a current.
This should be used when Save is called by the Create() method.
"""
raise NotImplementedError
@classmethod
def _LoadAsForeign(cls, connection, relation_value, method=None):
"""Loads a record as a foreign relation of another.
Defaults to using the _LOAD_METHOD defined on the class, but when
provided the optional `method` argument, this named method is used instead.
"""
if method is None:
method = cls._LOAD_METHOD
return getattr(cls, method)(connection, relation_value)
# ############################################################################
# Functions for tracking table and primary key values
#
def _Changes(self):
"""Returns the differences of the current state vs the last stored state."""
sql_record = self._DataRecord()
changes = {}
for key, value in sql_record.items():
if self._record.get(key) != value:
changes[key] = value
return changes
def _DataRecord(self):
"""Returns a dictionary of the record's database values
For any Record object present, its primary key value (`Record.key`) is used.
"""
sql_record = {}
for key, value in super().items():
sql_record[key] = self._ValueOrPrimary(value)
return sql_record
@staticmethod
def _ValueOrPrimary(value):
"""Returns the value, or its primary key value if it's a Record."""
while isinstance(value, BaseRecord):
if hasattr(value, '_RECORD_KEY') and value._RECORD_KEY:
value = value[value._RECORD_KEY]
else:
value = value.key
return value
@classmethod
def TableName(cls):
"""Returns the database table name for the Record class.
If this is not explicitly defined by the class constant `_TABLE`, the return
value will be the class name with the first letter lowercased.
"""
if cls._TABLE:
return cls._TABLE
name = cls.__name__
return name[0].lower() + name[1:]
# Pylint falsely believes this property is overwritten by its setter later on.
# pylint: disable=E0202
@property
def key(self):
"""Returns the primary key for the object.
This is used for the Save/Update methods, where foreign relations should be
stored by their primary key.
"""
if isinstance(self._PRIMARY_KEY, tuple):
record = self._DataRecord()
return tuple(record[key] for key in self._PRIMARY_KEY)
return self.get(self._PRIMARY_KEY)
# pylint: enable=E0202
# Pylint doesn't understand property setters at all.
# pylint: disable=E0102, E0202, E1101
@key.setter
def key(self, value):
"""Sets the value of the primary key."""
if isinstance(value, tuple):
if len(value) != len(self._PRIMARY_KEY):
raise ValueError('Not enough values for compound key.')
for key, key_val in zip(self._PRIMARY_KEY, value):
self[key] = key_val
else:
self[self._PRIMARY_KEY] = value
# pylint: enable=E0102, E0202, E1101
Error = Error
AlreadyExistError = AlreadyExistError
NotExistError = NotExistError
PermissionError = PermissionError
class Record(BaseRecord):
"""Extensions to the Record abstraction for relational database use."""
_FOREIGN_RELATIONS = {}
_CONNECTOR = 'mysql'
SEARCHABLE_COLUMNS = []
# ############################################################################
# Methods enabling auto-loading
#
def GetRaw(self, field):
"""Returns the value of the field, suppressing auto-loading."""
return super(Record, self).__getitem__(field)
def __getitem__(self, field):
"""Returns the value corresponding to a given `field`.
If a field represents a foreign relation, this will be delegated to
the `_LoadForeign` method.
"""
value = super(Record, self).__getitem__(field)
return self._LoadForeign(field, value)
def _LoadForeign(self, field, value):
"""Loads and returns objects referenced by foreign key.
This is done by checking the `field` against the class' `_FOREIGN_RELATIONS`
mapping. If a match is found, `_LoadForeignFromRelationsTable` is executed
and its return value returned.
If the `field` is not present in the class mapping, it will be checked
against table names for each of the subclasses of Record. This mapping is
maintained in `_SUBTYPES`. If a match is found, an instance of the
corresponding class will replace the existing value, and will subsequently
be returned.
If the `field` is not present in either mapping, its value will remain
unchanged, and returned as such.
N.B. If the field name the same as the record's `TableName`, it will NOT be
automatically resolved. The assumption is that the field will not contain a
meaningful reference. This behavior can be altered by specifying the
relation in the _FOREIGN_RELATIONS class constant.
Arguments:
@ field: str
The field name to be checked for foreign references
@ value: obj
The current value for the field. This is used as lookup index in case
of foreign references.
Returns:
obj: The value belonging to the given `field`. In case of resolved foreign
references, this will be the referenced object. Else it's unchanged.
"""
if value is None:
return None
elif not isinstance(value, BaseRecord):
if field in self._FOREIGN_RELATIONS:
value = self._LoadUsingForeignRelations(
self._FOREIGN_RELATIONS[field], field, value)
elif field == self.TableName():
return value
elif field in self._SUBTYPES:
value = self._SUBTYPES[field]._LoadAsForeign(self.connection, value)
self[field] = value
return value
def _LoadUsingForeignRelations(self, foreign_cls, field, value):
"""Loads and returns foreign relation based on given class (name).
The action taken depends on the given `cls`. If the given class is None (or
otherwise boolean false), no action will be taken, and the value will be
returned unchanged.
If the class is given as string, it will be loaded from the current module.
It should be a proper subclass of Record, after which the current `value` is
used to create a record using `cls._LoadAsForeign`.
Arguments:
@ foreign_cls: Record / str / dict
The class name or actual type to create an instance from. Could also
be a dictionary with `class` and `loader` keys that indicate class and
method to use for loading foreign relations.
@ field: str
The field name to be checked for foreign references
@ value: obj
The current value for the field. This is used as lookup index in case
of foreign references.
Raises:
ValueError: If the class name cannot be found, or the type is not a
subclass of Record.
Returns:
obj: The value belonging to the given `field`. In case of resolved foreign
references, this will be the referenced object. Else it's unchanged.
"""
def GetRecordClass(cls):
"""Returns the record class or loads it from its string name"""
if isinstance(cls, str):
try:
cls = getattr(sys.modules[self.__module__], cls)
except AttributeError:
raise ValueError(
'Bad _FOREIGN_RELATIONS map: Target %r not a class in %r' % (
cls, self.__module__))
if not issubclass(cls, Record):
raise ValueError('Bad _FOREIGN_RELATIONS map: '
'Target %r not a subclass of Record' % cls.__name__)
return cls
if foreign_cls is None:
return value
if type(foreign_cls) is dict:
cls = GetRecordClass(foreign_cls['class'])
loader = foreign_cls.get('loader')
return cls._LoadAsForeign(self.connection, value, method=loader)
return GetRecordClass(foreign_cls)._LoadAsForeign(self.connection, value)
# ############################################################################
# Override basic dict methods so that autoload mechanisms function on them.
#
def get(self, key, default=None):
"""Returns the value for `key` if its present, otherwise `default`."""
try:
return self[key]
except KeyError:
return default
def pop(self, field, *default):
"""Pops the value corresponding to the field from the Record.
If the field does not exist, either KeyError or an optional default value
is returned instead.
"""
try:
value = self[field]
except KeyError:
if not default:
raise
return default[0]
del self[field]
return value
def iteritems(self):
"""Yields all field+value pairs in the Record.
N.B. This automatically resolves foreign references.
"""
return ((key, self[key]) for key in self)
def itervalues(self):
"""Yields all values in the Record, loading foreign references."""
return (self[key] for key in self)
def items(self):
"""Returns a list of field+value pairs in the Record.
N.B. This automatically resolves foreign references.
"""
return list(self.iteritems())
def values(self):
"""Returns a list of values in the Record, loading foreign references."""
return list(self.itervalues())
# ############################################################################
# Private methods to be used for development
#
@classmethod
def _FromParent(cls, parent, relation_field=None, conditions=None,
limit=None, offset=None, order=None,
yield_unlimited_total_first=False):
"""Returns all `cls` objects that are a child of the given parent.
This utilized the parent's _Children method, with either this class'
TableName or the filled out `relation_field`.
Arguments:
@ parent: Record
The parent for who children should be found in this class
% relation_field: str ~~ cls.TableName()
The fieldname in this class' table which relates to the parent's primary
key. If not given, parent.TableName() will be used.
% conditions: str / iterable ~~ None
The extra condition(s) that should be applied when querying for records.
% limit: int ~~ None
Specifies a maximum number of | |
<filename>oks.py<gh_stars>1-10
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import os
import sys
import os.path
import datetime
import re
import locale
import gtk
import gobject
from core.output.handlers.string import StringOutputHandler
from core.output.handlers.view import ViewOutputHandler
import oks
from oks.db.manager import DatabaseManager
from oks.db.app import (TABLE_COMPANIES, TABLE_INVENTORY, TABLE_OPERATIONS,
TABLE_TRANSACTIONS_VIEW, TABLE_TO_TYPE)
from oks.elements.company import Company
from oks.elements.item import Item
from oks.elements.operation import Operation
from oks.elements.operation.product import Product
from oks.elements.operation.pitem import ProductionItem
from oks.elements.operation.pitem.rawmaterial import RawMaterial
from oks.elements.operation.eitem import ExchangeItem
from oks.elements.operation.transaction import Transaction
from oks.reports import companies, inventory, operations, transactions
from oks.gui.window import Window
from oks.gui.dialogs.company import DialogCompany
from oks.gui.dialogs.operation import DialogOperation
from oks.gui.dialogs.item import DialogItem
from oks.gui.dialogs.operation_type import DialogSelectOperationType
from oks.gui.entrydate import EntryDate
from oks.gui.fields import *
from oks.gui.columns import *
from oks.gui.title import Title
from oks.gui.printaction import PrintAction
from oks.gui.searchbox import SearchBox
def clear_gtk_container(container):
for child in container.get_children():
container.remove(child)
class DialogTable(Window):
AUTO_UPDATE_SEARCH = True
SORT = True
def __init__(self, gladeFile, db_path):
# Initiating the dialog
builder = gtk.Builder()
builder.add_from_file(gladeFile)
Window.__init__(self, builder, "main")
self.set_size(1300, 800) # Makes the hpaned be place appropriately
self.window.maximize()
# Loading some required widgets
self.load_widget("hpaned")
self.load_widget("treeview")
self.load_widget("vbox_main")
self.load_widget("toolbar")
self.load_widget("toolbar_separator")
self.load_widget("toolbutton_add")
self.load_widget("toolbutton_switch_mode")
self.load_widget("vbox_right")
self.load_widget("vbox_left_main")
self.load_widget("action_view_reports")
self.load_widget("action_view_main")
self.load_widget("vbox_left_reports")
self.load_widget("action_new")
self.load_widget("dialogAbout")
self.load_widget("combobox_report_type")
self.load_widget("label_report_description")
self.load_widget("table_report_options")
self.load_widget("vbox_report_options_frame")
self.load_widget("statusbar")
self.load_widget("label_report_options")
self.current_left = None
# Setting the view area
self.title = Title(self.window, "Açõe_s")
self.title.enable_actions(False)
self.vbox_right.pack_start(self.title, False, False)
self.vbox_right.reorder_child(self.title, 0)
self.hpaned.pack2(self.vbox_right, True, False)
# Report selection area
attributes = pango.AttrList()
attributes.insert(pango.AttrStyle(pango.STYLE_ITALIC, 0, -1))
self.label_report_description.set_attributes(attributes)
self.combobox_report_type = ComboBoxField(self.combobox_report_type,
None,
gobject.TYPE_STRING,
gobject.TYPE_PYOBJECT)
self.combobox_report_type.connect("new-value",
self.on_report_type_selected)
# Setting the view area
self.textview = TextViewField(self.get_widget("textview"))
self.textview.widget.set_editable(False)
self.textview.set_font("monospace")
# Search
self.search_timeout = 0
self.search_box = SearchBox(self.AUTO_UPDATE_SEARCH)
self.search_box.connect("search-updated", self.auto_update_search)
self.get_widget("vbox_search").pack_start(self.search_box, False)
# statusbar
self.statusbar.get_context_id("oks")
# Tables and Reports
self.tables = {}
self.reports = {}
# Defining the default tables. Each table has a set of properties
# that are keys in a dictionary:
#
# window_title: the window title
# columns: the TreeViewColumns used by the table
# default_sort: the default sort column and order
# search_widgets: the fields that will be put in the SearchBox
# editable: True if the table can be edited (add, remove, edit)
# new_stock: the stock for the new element that a table can hold
# completion: the completion for the search_widgets
#
# Companies
#
entryName = EntryField(gtk.Entry(), "name")
entryCity = EntryField(gtk.Entry(), "city")
self.tables[TABLE_COMPANIES] = {
"window_title": "Empresas",
"columns": [
TextColumn("Nome", 1, self.SORT, True),
TypeColumn("Tipo", 2, oks.COMPANY_TYPES_DESC, self.SORT),
TextColumn("Telefone", 9, self.SORT, True)],
"default_sort": (1, gtk.SORT_ASCENDING),
"search_widgets": [
("Nome: ", entryName),
("Cidade: ", entryCity)],
"actions": [
("_Editar", self.edit),
("_Remover", self.remove),
("_Imprimir", self.print_action),
("Imprimir e_tiqueta de correio", self.print_label)],
"new_stock": "Nova empresa",
"completion": [(entryCity, "companies:city")],
"reports": [companies.FullCompaniesReport,
companies.CompactCompaniesReport,
companies.MostImportantCompanies],
"main_state": (None, None),
"report_state": (None, None),
}
#
# Inventory
#
combobox_type = ComboBoxField(gtk.ComboBox(), "type_")
combobox_type.set_options(*oks.ITEM_TYPES_DESC)
combobox_type.set_defaultValue(oks.TYPE_ITEMS_ALL)
combobox_type.clear()
entry_item = EntryField(gtk.Entry(), "name")
self.tables[TABLE_INVENTORY] = {
"window_title": "Inventário",
"columns": [
TextColumn("Item", 1, self.SORT, True),
TypeColumn("Tipo", 2, oks.ITEM_TYPES_DESC, self.SORT),
FloatColumn("Quantidade", 4, self.SORT),
CurrencyColumn("Valor (R$)", 5, self.SORT)],
"default_sort": (1, gtk.SORT_ASCENDING),
"search_widgets": [ ("Item: ", entry_item),
("Tipo: ", combobox_type) ],
"actions": [("_Editar", self.edit),
("_Remover", self.remove)],
"new_stock": "Novo item",
"reports": [inventory.InventoryReport,
inventory.MostImportantItems],
"main_state": (None, None),
"report_state": (None, None),
}
#
# Operations
#
column_status = CheckButtonColumn("Status", 6)
column_status.cellRenderer.connect("toggled",
self.on_operation_status_change)
combobox_type = ComboBoxField(gtk.ComboBox(), "type_")
combobox_type.set_options(*oks.OPERATION_TYPES_DESC)
combobox_type.set_defaultValue(oks.TYPE_OPERATIONS_ALL)
combobox_type.clear()
entryCompany = EntryField(gtk.Entry(), "company")
entry_item = EntryField(gtk.Entry(), "name")
entry_date = EntryDate(self, "date", True)
entry_date.set_value(None)
self.tables[TABLE_OPERATIONS] = {
"window_title": "Operações",
"columns": [
DateColumn("Data", 4, self.SORT),
TypeColumn("Tipo", 1, oks.OPERATION_TYPES_DESC, self.SORT),
TextColumn("Empresa", 2, self.SORT, ellipsize = True),
IntegerColumn("ID", 3, self.SORT),
column_status],
"default_sort": (4, gtk.SORT_DESCENDING),
"search_widgets": [ ("Empresa: ", entryCompany),
("Item: ", entry_item),
("Período: ", entry_date),
("Tipo: ", combobox_type) ],
"actions": [
("_Editar", self.edit),
("_Remover", self.remove),
("_Imprimir", self.print_action),
("Dar _acabamento", self.finishing),
("_Copiar operação", self.copy_operation),
("Re_solver requisitos", self.resolve_reqs)],
"new_stock": "Nova operação",
"completion": [(entryCompany, "companies:name"),
(entry_item, "inventory:item")],
"reports": [operations.OutgoingOperationsReport,
operations.IncomingOperationsReport,
operations.ProductionOperationsReport,
operations.ProductionCostReport,
operations.ProductionSalesReport],
"main_state": (None, None),
"report_state": (None, None),
}
#
# Transactions
#
column_status = CheckButtonColumn("Status", 7)
column_status.cellRenderer.connect("toggled",
self.on_transactionStatusChanged)
entryCompany = EntryField(gtk.Entry(), "company")
combobox_type = ComboBoxField(gtk.ComboBox(), "type_")
combobox_type.set_options(*oks.TRANSACTION_TYPES_DESC)
combobox_type.set_defaultValue(oks.TYPE_TRANSACTIONS_ALL)
combobox_type.clear()
comboboxStatus = ComboBoxField(gtk.ComboBox(), "status")
comboboxStatus.set_options(*oks.TRANSACTION_STATUS_DESC)
comboboxStatus.set_defaultValue(oks.TYPE_STATUS_ALL)
comboboxStatus.clear()
entry_date = EntryDate(self, "date", True)
entry_date.set_value(None)
self.tables[TABLE_TRANSACTIONS_VIEW] = {
"window_title": "Transações",
"columns": [DateColumn("Data", 2, self.SORT),
TypeColumn("Tipo", 1, oks.TRANSACTION_TYPES_DESC,
self.SORT, True),
TextColumn("Empresa", 3, self.SORT, True),
IntegerColumn("ID", 4, self.SORT),
CurrencyColumn("Valor (R$)", 6, self.SORT),
column_status],
"default_sort": (2, gtk.SORT_DESCENDING),
"search_widgets": [ ("Empresa: ", entryCompany),
("Período: ", entry_date),
("Status: ", comboboxStatus),
("Tipo: ", combobox_type) ],
"actions": [],
"new_stock": None,
"completion": [(entryCompany, "companies:name")],
"reports": [transactions.PayablesReport,
transactions.ReceivablesReport],
"main_state": (None, None),
"report_state": (None, None),
}
# Load the database
self.db_man = DatabaseManager(db_path)
self.db_man.open_db()
self.load_db(self.db_man.db)
#
# Database management
#
def load_db(self, db):
self.models = {}
self.reports = {}
self.table = None
self.selected = None
self.mode = None
self.db = db
self.services = self.db.services
# Load the main mode
self.set_mode(oks.MODE_MAIN)
def on_action_import_db_activate(self, action):
response = self.show_message("Importar base de dados?",
"Importar uma base de dados vai "\
"sobrescrever a base de dados atual. "\
"Deseja prosseguir?",
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_YES_NO)
if response != gtk.RESPONSE_YES:
return
dialog = gtk.FileChooserDialog("Importar")
file_filter = gtk.FileFilter()
file_filter.set_name("SQLite3 database")
file_filter.add_mime_type("application/x-sqlite3")
dialog.add_filter(file_filter)
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.add_button(gtk.STOCK_OPEN, gtk.RESPONSE_OK)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_OPEN)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.db_man.import_db(dialog.get_filename())
self.load_db(self.db_man.db)
dialog.hide()
dialog.destroy()
def on_action_export_db_activate(self, action):
dialog = gtk.FileChooserDialog("Exportar")
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.add_button(gtk.STOCK_SAVE, gtk.RESPONSE_OK)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_SAVE)
dialog.set_do_overwrite_confirmation(True)
dialog.set_current_name("db-%s" % str(datetime.date.today()))
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.db_man.export_db(dialog.get_filename())
self.load_db(self.db_man.db)
dialog.hide()
dialog.destroy()
#
# Mode setting
#
def set_mode(self, mode):
self.save_state()
if mode == oks.MODE_MAIN:
self.toolbar.remove(self.toolbar_separator)
self.toolbar.insert(self.toolbar_separator, 5)
self.toolbutton_add.show()
self.action_view_reports.connect_proxy(self.toolbutton_switch_mode)
left = self.vbox_left_main
elif mode == oks.MODE_REPORT:
self.toolbar.remove(self.toolbar_separator)
self.toolbar.insert(self.toolbar_separator, 4)
self.toolbutton_add.hide()
self.action_view_main.connect_proxy(self.toolbutton_switch_mode)
left = self.vbox_left_reports
if self.current_left is not None:
self.hpaned.remove(self.current_left)
self.hpaned.pack1(left, True, False)
self.current_left = left
for child in self.toolbar.get_children():
child.set_is_important(True)
self.mode = mode
self.get_widget("radioaction_show_operations").toggled()
self.load_table(TABLE_OPERATIONS, True)
self.get_widget("radioaction_show_operations").activate()
self.set_statusbar_message()
def on_action_mode_activate(self, action):
if self.mode == oks.MODE_MAIN:
self.set_mode(oks.MODE_REPORT)
else:
self.set_mode(oks.MODE_MAIN)
#
# Tables
#
def save_state(self):
if self.mode == oks.MODE_MAIN:
model, iter_ = self.treeview.get_selection().get_selected()
path = None
if iter_ is not None:
path = model.get_path(iter_)
scroll = self.treeview.get_visible_rect()
self.tables[self.table]["main_state"] = (path, scroll)
elif self.mode == oks.MODE_REPORT:
report = self.combobox_report_type.get_value()
self.tables[self.table]["report_state"] = (report, None)
def load_table(self, table, force_reload=False):
if table == self.table and not force_reload:
return
if self.tables.has_key(table):
if table != self.table and self.table:
# Save the current state
self.save_state()
self.clear_view()
self.table = table
else:
raise KeyError, "The table %s is not supported." % table
# If we are in report mode, just set it and ignore the table setting
if self.mode == oks.MODE_REPORT:
# Clear the previous report description and options
self.label_report_description.set_text("")
clear_gtk_container(self.table_report_options)
self.window.set_title("Relatórios")
self.vbox_report_options_frame.hide_all()
reports = self.tables[table]["reports"]
reports = [(report.NAME, report) for report in reports]
self.combobox_report_type.set_options(*reports)
# Restore the previous state
report, scroll = self.tables[self.table]["report_state"]
if report is not None:
self.combobox_report_type.set_value(report)
self.hpaned.set_position(300)
return
# Window title
self.window.set_title(self.tables[table]["window_title"])
# Search
self.search_box.reset()
for (label, searchWidget) in self.tables[table]["search_widgets"]:
self.search_box.add_field(label, searchWidget)
# Actions
self.title.enable_actions(False)
self.actions_enabled = not self.tables[table]["actions"] == []
self.title.set_actions(*self.tables[table]["actions"])
# Treeview
if table not in self.models.keys():
self.model = self.db.models[table]
self.f_model = self.model.filter_new()
self.f_model.set_visible_func(self.visible_func)
self.sf_model = None
if self.SORT:
self.sf_model = gtk.TreeModelSort(self.f_model)
sort_col, sort_type = self.tables[table]["default_sort"]
if sort_col is not None and sort_type is not None:
self.sf_model.set_sort_column_id(sort_col, sort_type)
self.models[table] = (self.model, self.f_model, self.sf_model)
else:
self.model, self.f_model, self.sf_model = self.models[table]
for column in self.treeview.get_columns():
self.treeview.remove_column(column)
for column in self.tables[table]["columns"]:
self.treeview.append_column(column)
self.selected = None
self.reload_search(False)
if self.SORT:
model = self.sf_model
else:
model = self.f_model
self.treeview.set_model(model)
action_new_label = self.tables[table]["new_stock"]
if action_new_label:
self.action_new.set_sensitive(True)
self.action_new.set_property("label", action_new_label)
self.action_new.set_property("short_label", action_new_label)
else:
self.action_new.set_sensitive(False)
# Completion support
completion = self.services["completion"]
if self.tables[table].has_key("completion"):
for entry, completion_name in self.tables[table]["completion"]:
entry.set_completion(completion(completion_name))
# Restore the previous state
element, scroll = self.tables[self.table]["main_state"]
if element is not None:
self.treeview.get_selection().select_path(element)
self.on_treeview_cursor_changed()
else:
self.clear_view()
if scroll is not None:
# Add a timeout to prevent flickering
cb = lambda: self.treeview.scroll_to_point(scroll.x, scroll.y);\
False
gobject.timeout_add(250, cb)
self.hpaned.set_position(420)
self.search_box.grab_focus()
self.f_model.refilter() # Refilter, so that the sorting works fine
def on_radioaction_show_changed(self, widget, current):
tables = [TABLE_COMPANIES,
| |
draws a resistor
parent: parent object
position: position [x,y]
value: string with resistor value. If it ends with 'ohm', 'OHM' or 'Ohm', proper Ohm symbol will be added. (Default 'R')
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
"""
group = self.createGroup(parent,label)
elem = self.createGroup(group)
inkDraw.line.relCoords(elem, [[15.5,0],[2,3],[3,-6],[3,6],[3,-6],[3,6],[3,-6],[2,3],[15.5,0]],position)
pos_text=[position[0]+25,position[1]-3-self.textOffset]
if inkDraw.useLatex:
value='$'+value +'$'
inkDraw.text.latex(self,group,value,pos_text,fontSize=self.fontSize,refPoint='bc',preambleFile=self.preambleFile)
if angleDeg!=0:
self.rotateElement(group,position,angleDeg)
if flagVolt:
self.drawVoltArrow(group,[position[0]+25 ,position[1]+5],name=voltName,color=self.voltageColor,angleDeg=angleDeg,invertArrows=not invertArrows)
if flagCurr:
self.drawCurrArrow(group,[position[0]+40 ,position[1]-5],name=currName,color=self.currentColor,angleDeg=angleDeg,invertArrows=invertArrows)
return group;
#---------------------------------------------
def drawPotentiometer(self,parent,position=[0, 0],value='R',label='Potentiometer',angleDeg=0,
flagVolt=True,voltName='v',flagCurr=True,currName='i',invertArrows=False,is3T=False):
""" draws a potentiometer
parent: parent object
position: position [x,y]
value: string with resistor value. If it ends with 'ohm', 'OHM' or 'Ohm', proper Ohm symbol will be added. (Default 'R')
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
is3T: indicates the drawPotentiometer has 3 terminals (default:false)
"""
group = self.createGroup(parent,label)
elem = self.createGroup(group)
# build arrow marker
colorBlack=inkDraw.color.defined('black')
L_arrow=2.5
markerPath = 'M 0,0 l -%f,%f l 0,-%f z'% (L_arrow*1.2, L_arrow/2.0,L_arrow)
markerArrow=inkDraw.marker.createMarker(self, 'BJTArrow', markerPath, RenameMode=1, strokeColor=colorBlack, fillColor=colorBlack,lineWidth=0.6,markerTransform='translate (1,0)')
lineStyleArrow = inkDraw.lineStyle.set(lineWidth=1, lineColor=colorBlack, markerEnd=markerArrow)
inkDraw.line.relCoords(elem, [[15.5,0],[2,3],[3,-6],[3,6],[3,-6],[3,6],[3,-6],[2,3],[15.5,0]],position)
# 2-terminal Potentiometer
if is3T:
inkDraw.line.relCoords(elem, [[0,-10]],[position[0]+25,position[1]+15],lineStyle=lineStyleArrow)
pos_text=[position[0]+25,position[1]-3-self.textOffset]
else:
inkDraw.line.relCoords(elem, [[20,-12]],[position[0]+15,position[1]+6],lineStyle=lineStyleArrow)
pos_text=[position[0]+25,position[1]-6-self.textOffset]
if inkDraw.useLatex:
value='$'+value +'$'
inkDraw.text.latex(self,group,value,pos_text,fontSize=self.fontSize,refPoint='bc',preambleFile=self.preambleFile)
if angleDeg!=0:
self.rotateElement(group,position,angleDeg)
if flagVolt:
if is3T:
pos=[position[0]+25 ,position[1]+5]
else:
pos=[position[0]+25 ,position[1]+8]
self.drawVoltArrow(group,pos,name=voltName,color=self.voltageColor,angleDeg=angleDeg,invertArrows=not invertArrows)
if flagCurr:
if is3T:
pos=[position[0]+40 ,position[1]-5]
else:
pos=[position[0]+42 ,position[1]-5]
self.drawCurrArrow(group,pos,name=currName,color=self.currentColor,angleDeg=angleDeg,invertArrows=invertArrows)
return group;
#---------------------------------------------
def drawCapacitor(self,parent,position=[0, 0],value='C',label='Capacitor',flagPol=False,angleDeg=0,
flagVolt=True,voltName='v',flagCurr=True,currName='i',invertArrows=False):
""" draws a capacitor
parent: parent object
position: position [x,y]
value: string with value.
label: label of the object (it can be repeated)
flagPol: draw sign for polarized capacitor
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
"""
group = self.createGroup(parent,label)
elem = self.createGroup(group,label)
inkDraw.line.relCoords(elem, [[23,0]],position)
inkDraw.line.relCoords(elem, [[-23,0]],[position[0]+50,position[1]])
inkDraw.line.relCoords(elem, [[0,-14]],[position[0]+23,position[1]+7])
inkDraw.line.relCoords(elem, [[0,-14]],[position[0]+27,position[1]+7])
pos_text=[position[0]+25,position[1]-8-self.textOffset]
if inkDraw.useLatex:
value='$'+value +'$'
inkDraw.text.latex(self,group,value,pos_text,fontSize=self.fontSize,refPoint='bc',preambleFile=self.preambleFile)
if flagPol:
inkDraw.text.write(self,'+',[position[0]+31,position[1]-3],group,self.textStyle,fontSize=5)
if angleDeg!=0:
self.rotateElement(group,position,angleDeg)
if flagVolt:
self.drawVoltArrow(group,[position[0]+25 ,position[1]+9],name=voltName,color=self.voltageColor,angleDeg=angleDeg,invertArrows=not invertArrows)
if flagCurr:
self.drawCurrArrow(group,[position[0]+40 ,position[1]-5],name=currName,color=self.currentColor,angleDeg=angleDeg,invertArrows=invertArrows)
return group;
#---------------------------------------------
def drawInductor(self,parent,position=[0, 0],value='L',label='Inductro',angleDeg=0,
flagVolt=True,voltName='v',flagCurr=True,currName='i',invertArrows=False):
""" draws an inductor
parent: parent object
position: position [x,y]
value: string with resistor value. If it ends with 'ohm', 'OHM' or 'Ohm', proper Ohm symbol will be added. (Default 'R')
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
"""
group = self.createGroup(parent,label)
elem = self.createGroup(group,label)
inkDraw.line.relCoords(elem, [[13,0]],position)
inkDraw.line.relCoords(elem, [[-13,0]],[position[0]+50,position[1]])
inkDraw.arc.centerAngStartAngEnd(elem,[position[0]+16,position[1]], 3.0,0.0,180.0,[0,0],flagOpen=True,largeArc=False)
inkDraw.arc.centerAngStartAngEnd(elem,[position[0]+22,position[1]], 3.0,0.0,180.0,[0,0],flagOpen=True,largeArc=False)
inkDraw.arc.centerAngStartAngEnd(elem,[position[0]+28,position[1]], 3.0,0.0,180.0,[0,0],flagOpen=True,largeArc=False)
inkDraw.arc.centerAngStartAngEnd(elem,[position[0]+34,position[1]], 3.0,0.0,180.0,[0,0],flagOpen=True,largeArc=False)
pos_text=[position[0]+25,position[1]-self.textOffset]
if inkDraw.useLatex:
value='$'+value +'$'
inkDraw.text.latex(self,group,value,pos_text,fontSize=self.fontSize,refPoint='bc',preambleFile=self.preambleFile)
if angleDeg!=0:
self.rotateElement(group,position,angleDeg)
if flagVolt:
self.drawVoltArrow(group,[position[0]+25 ,position[1]+5],name=voltName,color=self.voltageColor,angleDeg=angleDeg,invertArrows=not invertArrows)
if flagCurr:
self.drawCurrArrow(group,[position[0]+40 ,position[1]-5],name=currName,color=self.currentColor,angleDeg=angleDeg,invertArrows=invertArrows)
return group;
#---------------------------------------------
def drawDiode(self,parent,position=[0, 0],value='D',label='diode',angleDeg=0,
flagVolt=True,voltName='v',flagCurr=True,currName='i',invertArrows=False,flagType='regular',mirror=False):
""" draws a diode
parent: parent object
position: position [x,y]
value: string with resistor value. (default 'D')
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)]
flagType: tipe of element: available types: 'regular', 'LED', 'photoDiode', 'zener', 'schottky','tunnel','varicap' (default: regular)
mirror: mirror diode (default: False)
"""
group = self.createGroup(parent,label)
elem = self.createGroup(group)
if mirror:
if flagType == 'varicap':
inkDraw.line.relCoords(elem, [[16,0]],position)
else:
inkDraw.line.relCoords(elem, [[19,0]],position)
inkDraw.line.relCoords(elem, [[12,6],[0,-12],[-12,6]],[position[0]+19,position[1]])
inkDraw.line.relCoords(elem, [[19,0]],[position[0]+31,position[1]])
if flagType in ['regular','LED','photoDiode']:
inkDraw.line.relCoords(elem, [[0,12]],[position[0]+19,position[1]-6])
if flagType == 'zener':
inkDraw.line.relCoords(elem, [[-2,-2],[0,-10],[-2,-2]],[position[0]+19+2,position[1]+5+2])
if flagType == 'schottky':
inkDraw.line.relCoords(elem, [[0,2],[3,0],[0,-12],[3,0],[0,2]],[position[0]+19-3,position[1]+6-2])
if flagType == 'tunnel':
if mirror:
inkDraw.line.relCoords(elem, [[-3,0],[0,-12],[3,0]],[position[0]+19+3,position[1]+6])
else:
inkDraw.line.relCoords(elem, [[3,0],[0,-12],[-3,0]],[position[0]+19-3,position[1]+6])
if flagType == 'varicap':
inkDraw.line.relCoords(elem, [[0,12]],[position[0]+19,position[1]-6])
if mirror:
inkDraw.line.relCoords(elem, [[0,12]],[position[0]+16,position[1]-6])
else:
inkDraw.line.relCoords(elem, [[0,12]],[position[0]+22,position[1]-6])
else:
inkDraw.line.relCoords(elem, [[19,0]],position)
inkDraw.line.relCoords(elem, [[-12,6],[0,-12],[12,6]],[position[0]+31,position[1]])
if flagType == 'varicap':
inkDraw.line.relCoords(elem, [[16,0]],[position[0]+31+3,position[1]])
else:
inkDraw.line.relCoords(elem, [[19,0]],[position[0]+31,position[1]])
if flagType in ['regular','LED','photoDiode']:
inkDraw.line.relCoords(elem, [[0,12]],[position[0]+31,position[1]-6])
if flagType == 'zener':
inkDraw.line.relCoords(elem, [[-2,-2],[0,-10],[-2,-2]],[position[0]+31+2,position[1]+5+2])
if flagType == 'schottky':
inkDraw.line.relCoords(elem, [[0,2],[3,0],[0,-12],[3,0],[0,2]],[position[0]+31-3,position[1]+6-2])
if flagType == 'tunnel':
inkDraw.line.relCoords(elem, [[3,0],[0,-12],[-3,0]],[position[0]+31-3,position[1]+6])
if flagType == 'varicap':
inkDraw.line.relCoords(elem, [[0,12]],[position[0]+31,position[1]-6])
inkDraw.line.relCoords(elem, [[0,12]],[position[0]+34,position[1]-6])
if value!=None:
if flagType=='LED':
pos_text=[position[0]+25,position[1]-13-self.textOffset]
if flagType=='photoDiode':
pos_text=[position[0]+25,position[1]-13-self.textOffset]
if flagType in ['regular','zener','schottky','tunnel','varicap']:
pos_text=[position[0]+25,position[1]-6-self.textOffset]
if inkDraw.useLatex:
value='$'+value +'$'
inkDraw.text.latex(self,group,value,pos_text,fontSize=self.fontSize,refPoint='bc',preambleFile=self.preambleFile)
if angleDeg!=0:
self.rotateElement(group,position,angleDeg)
if flagVolt:
self.drawVoltArrow(group,[position[0]+25 ,position[1]+7],name=voltName,color=self.voltageColor,angleDeg=angleDeg,invertArrows= not (invertArrows != mirror ))
if flagCurr:
self.drawCurrArrow(group,[position[0]+40 ,position[1]-5],name=currName,color=self.currentColor,angleDeg=angleDeg,invertArrows=(invertArrows != mirror ))
if flagType=='LED':
arrow = self.createGroup(elem)
inkDraw.line.relCoords(arrow, [[7,0]],position)
inkDraw.line.relCoords(arrow, [[1.5,-1.5],[-1.5,-1.5]],[position[0]+5.5,position[1]+1.5])
self.rotateElement(arrow,position,60)
self.moveElement(arrow,[22,-8])
arrow = self.createGroup(elem)
inkDraw.line.relCoords(arrow, [[7,0]],position)
inkDraw.line.relCoords(arrow, [[1.5,-1.5],[-1.5,-1.5]],[position[0]+5.5,position[1]+1.5])
self.rotateElement(arrow,position,60)
self.moveElement(arrow,[27,-6])
if flagType=='photoDiode':
arrow = self.createGroup(elem)
inkDraw.line.relCoords(arrow, [[7,0]],position)
inkDraw.line.relCoords(arrow, [[1.5,-1.5],[-1.5,-1.5]],[position[0]+5.5,position[1]+1.5])
self.rotateElement(arrow,position,-120)
self.moveElement(arrow,[25,-14])
arrow = self.createGroup(elem)
inkDraw.line.relCoords(arrow, [[7,0]],position)
inkDraw.line.relCoords(arrow, [[1.5,-1.5],[-1.5,-1.5]],[position[0]+5.5,position[1]+1.5])
self.rotateElement(arrow,position,-120)
self.moveElement(arrow,[30,-12])
return group;
#---------------------------------------------
def drawSourceV(self,parent,position=[0, 0],value='v(t)',label='Source',angleDeg=0,
flagVolt=True,flagCurr=True,currName='i',invertArrows=False,mirror=False):
""" draws a independend general voltage source
parent: parent object
position: position [x,y]
value: string with value.
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
mirror: mirror source drawing (default: False)
"""
group = self.createGroup(parent,label)
elem = self.createGroup(group,label)
inkDraw.line.relCoords(elem, [[18,0]],position)
inkDraw.line.relCoords(elem, [[-18,0]],[position[0]+50,position[1]])
inkDraw.circle.centerRadius(elem, [25,0],7.0,offset=position, label='circle')
#signs
lineStyleSign=inkDraw.lineStyle.setSimpleBlack(lineWidth=0.6)
if mirror:
inkDraw.line.relCoords(elem, [[-2,0]],[position[0]+22,position[1]],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,2]],[position[0]+21,position[1]-1],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,2]],[position[0]+29,position[1]-1],lineStyle=lineStyleSign)
else:
inkDraw.line.relCoords(elem, [[-2,0]],[position[0]+30,position[1]],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,2]],[position[0]+29,position[1]-1],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,2]],[position[0]+21,position[1]-1],lineStyle=lineStyleSign)
pos_text=[position[0]+25,position[1]-8-self.textOffset]
if inkDraw.useLatex:
value='$'+value +'$'
inkDraw.text.latex(self,group,value,pos_text,fontSize=self.fontSize,refPoint='bc',preambleFile=self.preambleFile)
if angleDeg!=0:
self.rotateElement(group,position,angleDeg)
if flagVolt:
self.drawVoltArrow(group,[position[0]+25 ,position[1]+8],name=value,color=self.voltageColor,angleDeg=angleDeg,invertArrows=not mirror)
if flagCurr:
self.drawCurrArrow(group,[position[0]+40 ,position[1]-5],name=currName,color=self.currentColor,angleDeg=angleDeg,invertArrows=(invertArrows== mirror))
return group;
#---------------------------------------------
def drawSourceVSinusoidal(self,parent,position=[0, 0],value='v(t)',label='Source',angleDeg=0,
flagVolt=True,flagCurr=True,currName='i',invertArrows=False,mirror=False):
""" draws a independend sinusoidal voltage source
parent: parent object
position: position [x,y]
value: string with value.
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
mirror: mirror source drawing (default: False)
"""
group = self.createGroup(parent,label)
elem = self.createGroup(group,label)
inkDraw.line.relCoords(elem, [[18,0]],position)
inkDraw.line.relCoords(elem, [[-18,0]],[position[0]+50,position[1]])
inkDraw.circle.centerRadius(elem, [25,0],7.0,offset=position, label='circle')
#signs
sine = self.createGroup(elem)
lineStyleSign=inkDraw.lineStyle.setSimpleBlack(lineWidth=0.6)
inkDraw.arc.startEndRadius(sine,[position[0]+20,position[1]], [position[0]+25,position[1]], 2.6, [0,0], lineStyle=lineStyleSign,flagRightOf=True,flagOpen=True,largeArc=False)
inkDraw.arc.startEndRadius(sine,[position[0]+30,position[1]], [position[0]+25,position[1]], 2.6, [0,0], lineStyle=lineStyleSign,flagRightOf=True,flagOpen=True,largeArc=False)
self.rotateElement(sine,[position[0]+25,position[1]],-angleDeg)
if mirror:
inkDraw.line.relCoords(elem, [[-2,0]],[position[0]+16,position[1]-4],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,2]],[position[0]+15,position[1]-5],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,2]],[position[0]+35,position[1]-5],lineStyle=lineStyleSign)
else:
inkDraw.line.relCoords(elem, [[-2,0]],[position[0]+36,position[1]-4],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,2]],[position[0]+35,position[1]-5],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,2]],[position[0]+15,position[1]-5],lineStyle=lineStyleSign)
pos_text=[position[0]+25,position[1]-8-self.textOffset]
if inkDraw.useLatex:
value='$'+value +'$'
inkDraw.text.latex(self,group,value,pos_text,fontSize=self.fontSize,refPoint='bc',preambleFile=self.preambleFile)
if angleDeg!=0:
self.rotateElement(group,position,angleDeg)
if flagVolt:
self.drawVoltArrow(group,[position[0]+25 ,position[1]+8],name=value,color=self.voltageColor,angleDeg=angleDeg,invertArrows=not mirror)
if flagCurr:
self.drawCurrArrow(group,[position[0]+40 ,position[1]-5],name=currName,color=self.currentColor,angleDeg=angleDeg,invertArrows=(invertArrows== mirror))
return group;
#---------------------------------------------
def drawSourceVDC(self,parent,position=[0, 0],value='V',label='Source',angleDeg=0,
flagVolt=True,flagCurr=True,currName='i',invertArrows=False,mirror=False):
""" draws a DC voltage source
parent: parent object
position: position [x,y]
value: string with value.
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: output current drop name (default: i)
mirror: mirror source drawing (default: False)
"""
group = self.createGroup(parent,label)
elem = self.createGroup(group,label)
inkDraw.line.relCoords(elem, [[24,0]],position)
inkDraw.line.relCoords(elem, [[-23,0]],[position[0]+50,position[1]])
#draw source
lineStyleSign=inkDraw.lineStyle.setSimpleBlack(lineWidth=0.6)
if mirror:
inkDraw.line.relCoords(elem, [[-2,0]],[position[0]+21,position[1]-4],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,2]],[position[0]+20,position[1]-5],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,2]],[position[0]+30,position[1]-5],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,-6]],[position[0]+27,position[1]+3])
inkDraw.line.relCoords(elem, [[0,-14]],[position[0]+24,position[1]+7])
else:
inkDraw.line.relCoords(elem, [[-2,0]],[position[0]+31,position[1]-4],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,2]],[position[0]+30,position[1]-5],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,2]],[position[0]+21,position[1]-5],lineStyle=lineStyleSign)
inkDraw.line.relCoords(elem, [[0,-6]],[position[0]+24,position[1]+3])
inkDraw.line.relCoords(elem, | |
<filename>Engine/Extras/Maya_AnimationRiggingTools/MayaTools/General/Scripts/perforceUtils.py<gh_stars>1-10
import maya.cmds as cmds
from P4 import P4,P4Exception
import os, cPickle
from functools import partial
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def p4_getLatestRevision(fileName, *args):
fileArg = fileName
#try to connect
p4 = P4()
try:
p4.connect()
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to connect to perforce server.")
return
#find currently opened file name
if fileName == None:
fileName = cmds.file(q = True, sceneName = True)
syncFiles = []
try:
#client info
spec = p4.run( "client", "-o" )[0]
client = spec.get("Client")
owner = spec.get("Owner")
p4.user = owner
p4.client = client
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to obtain client spec information.")
#try to get the current file revision on local, and compare to depot
try:
#Find out the revision of the local version of the file
myFile = p4.run_have(fileName)[0]
#This will find the revision number of your local file.
localRevision = int(myFile['haveRev'])
#find out the revision number of the depot version of the file
depotVersion = p4.run_files(myFile['depotFile'])[0]
#find the depot file path
depotFile = depotVersion['depotFile']
#find the depot revision number of the file
depotRevision = int(depotVersion['rev'])
#check for latest
if localRevision != depotRevision:
syncFiles.append(depotFile)
#Check for scene references in the file
allRefs = []
references = cmds.file(q = True, reference = True)
for reference in references:
nestedRef = cmds.file(reference, q = True, reference = True)
allRefs.append(reference)
allRefs.append(nestedRef)
#loop through all found references and check for latest
for ref in allRefs:
#get revision of local file
myFile = p4.run_have(ref)[0]
#get revision number
localRefRevision = int(myFile['haveRev'])
#grab depot file info
depotRefVersion = p4.run_files(myFile['depotFile'])[0]
#depot file path
depotFile = depotRefVersion['depotFile']
#get depot's revision #
depotRefRevision = int(depotRefVersion['rev'])
#compare
if localRefRevision != depotRefRevision:
syncFiles.append(depotFile)
#if there are files to sync, do it now
if len(syncFiles) > 0:
message = "The following files are not at latest revision:\n\n"
for file in syncFiles:
message += file + "\n"
result = cmds.confirmDialog(title = "Perforce", icon = "warning", message = message, button = ["Sync", "Cancel"])
if result == "Sync":
#sync files
for f in syncFiles:
p4.run_sync(f)
#ask if user would like to reopen
if fileArg == None:
result = cmds.confirmDialog(title = "Perforce", icon = "question", message = "Sync Complete. Reopen file to get changes?", button = ["Yes", "Cancel"])
if result == "Yes":
cmds.file(fileName, open = True, force = True)
else:
cmds.confirmDialog(title = "Perforce", icon = "information", message = "This file is already at head revision.", button = "Close")
#disconnect from server
p4.disconnect()
#Handle any p4 errors that come back from trying to run the above code
except P4Exception:
errorString = "The following errors were encountered:\n\n"
for e in p4.errors:
errorString += e + "\n"
cmds.confirmDialog(title = "Perforce", icon = "critical", message = errorString)
p4.disconnect()
return
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def p4_checkOutCurrentFile(fileName, *args):
fileArg = fileName
#try to connect
p4 = P4()
try:
p4.connect()
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to connect to perforce server.")
return False
#find currently opened file name
if fileName == None:
fileName = cmds.file(q = True, sceneName = True)
reopen = False
syncFiles = []
try:
#client info
spec = p4.run( "client", "-o" )[0]
client = spec.get("Client")
owner = spec.get("Owner")
p4.user = owner
p4.client = client
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to obtain client spec information.")
try:
#check to see if file is at head revision
myFile = p4.run_have(fileName)[0]
#This will find the revision number of your local file.
localRevision = int(myFile['haveRev'])
#find out the revision number of the depot version of the file
depotVersion = p4.run_files(myFile['depotFile'])[0]
#find the depot file path
depotFile = depotVersion['depotFile']
#find the depot revision number of the file
depotRevision = int(depotVersion['rev'])
#check for latest
if localRevision != depotRevision:
result = cmds.confirmDialog(title = "Perforce", icon = "warning", message = "This file is not at head revision. Please get latest and try again.", button = ["Get Latest", "Cancel"])
if result == "Get Latest":
p4_getLatestRevision(fileArg)
p4.disconnect()
else:
return False
else:
try:
#check to see if file is checked out
opened = p4.run_opened(depotFile)
if len(opened) > 0:
user = opened[0]['user']
cmds.confirmDialog(title = "Perforce", icon = "warning", message = "This file is already checked out by: " + user, button = "Close")
p4.disconnect()
else:
#check out the file
p4.run_edit(depotFile)
cmds.confirmDialog(title = "Perfoce", icon = "information", message = "This file is now checked out.", button = "Close")
p4.disconnect()
#tools path
toolsPath = cmds.internalVar(usd = True) + "mayaTools.txt"
if os.path.exists(toolsPath):
f = open(toolsPath, 'r')
mayaToolsDir = f.readline()
f.close()
return True
#Handle any p4 errors that come back from trying to run the above code
except P4Exception:
errorString = "The following errors were encountered:\n\n"
for e in p4.errors:
errorString += e + "\n"
cmds.confirmDialog(title = "Perforce", icon = "critical", message = errorString)
return False
#Handle any p4 errors that come back from trying to run the above code
except P4Exception:
errorString = "The following errors were encountered:\n\n"
for e in p4.errors:
errorString += e + "\n"
cmds.confirmDialog(title = "Perforce", icon = "critical", message = errorString)
p4.disconnect()
return False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def p4_getRevisionHistory(*args):
#try to connect
p4 = P4()
try:
p4.connect()
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to connect to perforce server.")
return
#find currently opened file name
clientFile = cmds.file(q = True, sceneName = True)
reopen = False
syncFiles = []
try:
#client info
spec = p4.run( "client", "-o" )[0]
client = spec.get("Client")
owner = spec.get("Owner")
p4.user = owner
p4.client = client
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to obtain client spec information.")
#get revision history of current file
try:
#check to see if file is at head revision
myFile = p4.run_have(clientFile)[0]
depotVersion = p4.run_files(myFile['depotFile'])[0]
depotFile = depotVersion['depotFile']
history = p4.run_changes(depotFile)
info = ""
for h in history:
user = h.get("user")
change = h.get("change")
desc = h.get("desc")
if desc.find("\n") == -1:
desc = desc + "...\n"
else:
desc = desc.partition("\n")[0] + "...\n"
info += change + " by " + user + ": " + desc
#print report into a confirm dialog
cmds.confirmDialog(title = "History", icon = "information", ma = "left", message = info, button = "Close")
p4.disconnect()
except P4Exception:
errorString = "The following errors were encountered:\n\n"
for e in p4.errors:
errorString += e + "\n"
cmds.confirmDialog(title = "Perforce", icon = "critical", message = errorString)
p4.disconnect()
return
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def p4_submitCurrentFile(fileName, desc, *args):
fileArg = fileName
#try to connect
p4 = P4()
try:
p4.connect()
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to connect to perforce server.")
return
#find currently opened file name
if fileName == None:
fileName = cmds.file(q = True, sceneName = True)
reopen = False
syncFiles = []
try:
#client info
spec = p4.run( "client", "-o" )[0]
client = spec.get("Client")
owner = spec.get("Owner")
p4.user = owner
p4.client = client
except:
cmds.confirmDialog(title = "Perforce", icon = "critical", message = "Unable to obtain client | |
# -*- coding: utf-8 -*-
#
# This file is part of the python-chess library.
# Copyright (C) 2012-2019 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
import math
import mmap
import os
import re
import struct
import threading
import typing
import chess
from types import TracebackType
from typing import Dict, Iterator, List, Mapping, MutableMapping, Optional, Tuple, Type, Union
PathLike = Union[str, bytes]
UINT64_BE = struct.Struct(">Q")
UINT32 = struct.Struct("<I")
UINT32_BE = struct.Struct(">I")
UINT16 = struct.Struct("<H")
TBPIECES = 7
TRIANGLE = [
6, 0, 1, 2, 2, 1, 0, 6,
0, 7, 3, 4, 4, 3, 7, 0,
1, 3, 8, 5, 5, 8, 3, 1,
2, 4, 5, 9, 9, 5, 4, 2,
2, 4, 5, 9, 9, 5, 4, 2,
1, 3, 8, 5, 5, 8, 3, 1,
0, 7, 3, 4, 4, 3, 7, 0,
6, 0, 1, 2, 2, 1, 0, 6,
]
INVTRIANGLE = [1, 2, 3, 10, 11, 19, 0, 9, 18, 27]
def offdiag(square: chess.Square) -> int:
return chess.square_rank(square) - chess.square_file(square)
def flipdiag(square: chess.Square) -> chess.Square:
return ((square >> 3) | (square << 3)) & 63
LOWER = [
28, 0, 1, 2, 3, 4, 5, 6,
0, 29, 7, 8, 9, 10, 11, 12,
1, 7, 30, 13, 14, 15, 16, 17,
2, 8, 13, 31, 18, 19, 20, 21,
3, 9, 14, 18, 32, 22, 23, 24,
4, 10, 15, 19, 22, 33, 25, 26,
5, 11, 16, 20, 23, 25, 34, 27,
6, 12, 17, 21, 24, 26, 27, 35,
]
DIAG = [
0, 0, 0, 0, 0, 0, 0, 8,
0, 1, 0, 0, 0, 0, 9, 0,
0, 0, 2, 0, 0, 10, 0, 0,
0, 0, 0, 3, 11, 0, 0, 0,
0, 0, 0, 12, 4, 0, 0, 0,
0, 0, 13, 0, 0, 5, 0, 0,
0, 14, 0, 0, 0, 0, 6, 0,
15, 0, 0, 0, 0, 0, 0, 7,
]
FLAP = [
0, 0, 0, 0, 0, 0, 0, 0,
0, 6, 12, 18, 18, 12, 6, 0,
1, 7, 13, 19, 19, 13, 7, 1,
2, 8, 14, 20, 20, 14, 8, 2,
3, 9, 15, 21, 21, 15, 9, 3,
4, 10, 16, 22, 22, 16, 10, 4,
5, 11, 17, 23, 23, 17, 11, 5,
0, 0, 0, 0, 0, 0, 0, 0,
]
PTWIST = [
0, 0, 0, 0, 0, 0, 0, 0,
47, 35, 23, 11, 10, 22, 34, 46,
45, 33, 21, 9, 8, 20, 32, 44,
43, 31, 19, 7, 6, 18, 30, 42,
41, 29, 17, 5, 4, 16, 28, 40,
39, 27, 15, 3, 2, 14, 26, 38,
37, 25, 13, 1, 0, 12, 24, 36,
0, 0, 0, 0, 0, 0, 0, 0,
]
INVFLAP = [
8, 16, 24, 32, 40, 48,
9, 17, 25, 33, 41, 49,
10, 18, 26, 34, 42, 50,
11, 19, 27, 35, 43, 51,
]
FILE_TO_FILE = [0, 1, 2, 3, 3, 2, 1, 0]
KK_IDX = [[
-1, -1, -1, 0, 1, 2, 3, 4,
-1, -1, -1, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57,
], [
58, -1, -1, -1, 59, 60, 61, 62,
63, -1, -1, -1, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75,
76, 77, 78, 79, 80, 81, 82, 83,
84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107,
108, 109, 110, 111, 112, 113, 114, 115,
], [
116, 117, -1, -1, -1, 118, 119, 120,
121, 122, -1, -1, -1, 123, 124, 125,
126, 127, 128, 129, 130, 131, 132, 133,
134, 135, 136, 137, 138, 139, 140, 141,
142, 143, 144, 145, 146, 147, 148, 149,
150, 151, 152, 153, 154, 155, 156, 157,
158, 159, 160, 161, 162, 163, 164, 165,
166, 167, 168, 169, 170, 171, 172, 173,
], [
174, -1, -1, -1, 175, 176, 177, 178,
179, -1, -1, -1, 180, 181, 182, 183,
184, -1, -1, -1, 185, 186, 187, 188,
189, 190, 191, 192, 193, 194, 195, 196,
197, 198, 199, 200, 201, 202, 203, 204,
205, 206, 207, 208, 209, 210, 211, 212,
213, 214, 215, 216, 217, 218, 219, 220,
221, 222, 223, 224, 225, 226, 227, 228,
], [
229, 230, -1, -1, -1, 231, 232, 233,
234, 235, -1, -1, -1, 236, 237, 238,
239, 240, -1, -1, -1, 241, 242, 243,
244, 245, 246, 247, 248, 249, 250, 251,
252, 253, 254, 255, 256, 257, 258, 259,
260, 261, 262, 263, 264, 265, 266, 267,
268, 269, 270, 271, 272, 273, 274, 275,
276, 277, 278, 279, 280, 281, 282, 283,
], [
284, 285, 286, 287, 288, 289, 290, 291,
292, 293, -1, -1, -1, 294, 295, 296,
297, 298, -1, -1, -1, 299, 300, 301,
302, 303, -1, -1, -1, 304, 305, 306,
307, 308, 309, 310, 311, 312, 313, 314,
315, 316, 317, 318, 319, 320, 321, 322,
323, 324, 325, 326, 327, 328, 329, 330,
331, 332, 333, 334, 335, 336, 337, 338,
], [
-1, -1, 339, 340, 341, 342, 343, 344,
-1, -1, 345, 346, 347, 348, 349, 350,
-1, -1, 441, 351, 352, 353, 354, 355,
-1, -1, -1, 442, 356, 357, 358, 359,
-1, -1, -1, -1, 443, 360, 361, 362,
-1, -1, -1, -1, -1, 444, 363, 364,
-1, -1, -1, -1, -1, -1, 445, 365,
-1, -1, -1, -1, -1, -1, -1, 446,
], [
-1, -1, -1, 366, 367, 368, 369, 370,
-1, -1, -1, 371, 372, 373, 374, 375,
-1, -1, -1, 376, 377, 378, 379, 380,
-1, -1, -1, 447, 381, 382, 383, 384,
-1, -1, -1, -1, 448, 385, 386, 387,
-1, -1, -1, -1, -1, 449, 388, 389,
-1, -1, -1, -1, -1, -1, 450, 390,
-1, -1, -1, -1, -1, -1, -1, 451,
], [
452, 391, 392, 393, 394, 395, 396, 397,
-1, -1, -1, -1, 398, 399, 400, 401,
-1, -1, -1, -1, 402, 403, 404, 405,
-1, -1, -1, -1, 406, 407, 408, 409,
-1, -1, -1, -1, 453, 410, 411, 412,
-1, -1, -1, -1, -1, 454, 413, 414,
-1, -1, -1, -1, -1, -1, 455, 415,
-1, -1, -1, -1, -1, -1, -1, 456,
], [
457, 416, 417, 418, 419, 420, 421, 422,
-1, 458, 423, 424, 425, 426, 427, 428,
-1, -1, -1, -1, -1, 429, 430, 431,
-1, -1, -1, -1, -1, 432, 433, 434,
-1, -1, -1, -1, -1, 435, 436, 437,
-1, -1, -1, -1, -1, 459, 438, 439,
-1, -1, -1, -1, -1, -1, 460, 440,
-1, -1, -1, -1, -1, -1, -1, 461,
]]
PP_IDX = [[
0, -1, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46,
-1, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 61,
], [
62, -1, -1, 63, 64, 65, -1, 66,
-1, 67, 68, | |
<gh_stars>0
from logging import Logger
from os.path import join
from typing import Any, Dict, List, Optional, Union
import requests
import pandas as pd
from requests.exceptions import HTTPError
from .commons import figshare_stem, figshare_group
from .delta import Delta
from redata.commons.logger import log_stdout
# Administrative groups
from .manual_override import ManualOverride, update_entries
superadmins = figshare_group('GrouperSuperAdmins', '', production=True)
admins = figshare_group('GrouperAdmins', '', production=True)
managers = figshare_group('GrouperManagers', '', production=True)
class Grouper:
"""
This class uses the Grouper API to retrieve and send metadata
See `Main Grouper API documentation
<https://spaces.at.internet2.edu/display/Grouper/Grouper+Web+Services>`_.
:param grouper_host: Grouper hostname (e.g., grouper.iam.arizona.edu)
:param grouper_base_path: Grouper base path that includes the API version
(e.g., grouper-ws/servicesRest/json/v2_2_001)
:param grouper_user: Grouper username
:param grouper_password: <PASSWORD>
:param grouper_production: Bool to use production stem, ``figshare``.
Otherwise stage stem is used, ``figtest``. Default: production
:ivar grouper_host: Grouper hostname
:ivar grouper_base_path: Grouper base path that includes the API version
:ivar grouper_user: Grouper username
:ivar grouper_password: <PASSWORD>
:ivar grouper_production: Bool to use production stem, ``figshare``.
Otherwise stage stem is used, ``figtest``
:ivar tuple grouper_auth: Grouper credential
:ivar str endpoint: Grouper endpoint
:ivar dict headers: HTTPS header information
"""
def __init__(self, grouper_host: str, grouper_base_path: str,
grouper_user: str, grouper_password: str,
grouper_production: bool = False,
log: Optional[Logger] = None):
if isinstance(log, type(None)):
self.log = log_stdout()
else:
self.log = log
self.grouper_host = grouper_host
self.grouper_base_path = grouper_base_path
self.grouper_user = grouper_user
self.grouper_password = <PASSWORD>
self.grouper_production = grouper_production
self.grouper_auth: tuple = (self.grouper_user, self.grouper_password)
self.endpoint: str = f'https://{grouper_host}/{grouper_base_path}'
self.headers: dict = {'Content-Type': 'text/x-json'}
def url(self, endpoint: str) -> str:
"""
Return full Grouper URL endpoint
:param endpoint: The URL endpoint to append to ``self.endpoint``
:return: Complete HTTPS URL
"""
return join(self.endpoint, endpoint)
def query(self, group: str) -> Dict[str, Any]:
"""
Query Grouper for list of members in a group.
:param group: Grouper full group path from
:func:`requiam.commons.figshare_group`
:return: Grouper metadata
"""
endpoint = self.url(f"groups/{group}/members")
rsp = requests.get(endpoint, auth=self.grouper_auth)
grouper_query_dict = vars(self)
# Append query specifics
grouper_query_dict['grouper_members_url'] = endpoint
grouper_query_dict['grouper_group'] = group
if 'wsSubjects' in rsp.json()['WsGetMembersLiteResult']:
grouper_query_dict['members'] = \
{s['id'] for s in rsp.json()['WsGetMembersLiteResult']['wsSubjects']}
else:
grouper_query_dict['members'] = set([])
return grouper_query_dict
def get_group_list(self, group_type: str) -> Any:
"""
Retrieve list of groups in a Grouper stem
See `Grouper API "Get Groups"
<https://spaces.at.internet2.edu/display/Grouper/Get+Groups>`_
but with a different implementation using FIND_BY_STEM_NAME method
:param group_type: Grouper stem.
Options are: 'portal', 'quota', 'test', 'group_active', ''.
Note: Some groups (e.g., 'group_active') do not exist for production
:raises ValueError: If incorrect ``group_type``
:return: JSON response
"""
if group_type not in ['portal', 'quota', 'test', 'group_active', '']:
raise ValueError("Incorrect [group_type] input")
endpoint = self.url('groups')
grouper_stem = figshare_stem(group_type,
production=self.grouper_production)
params = dict()
params['WsRestFindGroupsRequest'] = {
'wsQueryFilter':
{'queryFilterType': 'FIND_BY_STEM_NAME',
'stemName': grouper_stem}
}
rsp = requests.post(endpoint, json=params, headers=self.headers,
auth=self.grouper_auth)
return rsp.json()
def get_group_details(self, group: str) -> Any:
"""
Retrieve group details
See `Grouper API "Get Groups"
<https://spaces.at.internet2.edu/display/Grouper/Get+Groups>`_
but using WsRestFindGroupsRequest
:param group: Grouper path from :func:`requiam.commons.figshare_group`
:return: JSON response
"""
endpoint = self.url('groups')
params = dict()
params['WsRestFindGroupsRequest'] = {
'wsQueryFilter':
{'queryFilterType': 'FIND_BY_GROUP_NAME_APPROXIMATE',
'groupName': group}
}
rsp = requests.post(endpoint, json=params, headers=self.headers,
auth=self.grouper_auth)
return rsp.json()['WsFindGroupsResults']['groupResults']
def check_group_exists(self, group: str, group_type: str) -> bool:
"""
Check whether a Grouper group exists within a Grouper stem
See `Grouper API "Find Groups"
<https://spaces.at.internet2.edu/display/Grouper/Find+Groups>`_
:param group: Grouper full group path from
:func:`requiam.commons.figshare_group`
:param group_type: Grouper stem.
Options are: 'portal', 'quota', 'test', 'group_active', ''
:raises ValueError: If incorrect ``group_type``
:raises KeyError: Stem does not exists
"""
if group_type not in ['portal', 'quota', 'test', 'group_active', '']:
raise ValueError("Incorrect [group_type] input")
result = self.get_group_list(group_type)
try:
group_df = pd.DataFrame(result['WsFindGroupsResults']['groupResults'])
df_query = group_df.loc[group_df['displayExtension'] == str(group)]
status = True if not df_query.empty else False
return status
except KeyError:
raise KeyError("Stem is empty")
def add_group(self, group: str, group_type: str, description: str) \
-> bool:
"""
Create Grouper group within a Grouper stem
See `Grouper API "Group Save"
<https://spaces.at.internet2.edu/display/Grouper/Group+Save>`_
:param group: Grouper full group path from
:func:`requiam.commons.figshare_group`
:param group_type: Grouper stem from
:func:`requiam.commons.figshare_stem`.
Options are: 'portal', 'quota', 'test', 'group_active', ''
:param description: Description of group to include as metadata.
This shows up in the Grouper UI
:raises ValueError: If incorrect ``group_type``
:raises HTTPError: If the Grouper POST fails with a non-200 status
"""
endpoint = self.url("groups")
if group_type not in ['portal', 'quota', 'test', 'group_active']:
raise ValueError("Incorrect [group_type] input")
grouper_name = figshare_group(group, group_type,
production=self.grouper_production)
params = dict()
params['WsRestGroupSaveRequest'] = {
'wsGroupToSaves': [
{'wsGroup': {'description': description,
'displayExtension': group,
'name': grouper_name},
'wsGroupLookup': {'groupName': grouper_name}}
]
}
try:
result = requests.post(endpoint, json=params, headers=self.headers,
auth=self.grouper_auth)
metadata = result.json()['WsGroupSaveResults']['resultMetadata']
if metadata['resultCode'] == 'SUCCESS':
return True
else:
errmsg = f"add_group - Error: {metadata['resultCode']}"
raise errmsg
except requests.exceptions.HTTPError:
raise requests.exceptions.HTTPError
def add_privilege(self,
access_group: str,
target_group: str,
target_group_type: str,
privileges: Union[str, List[str]]) -> bool:
"""
Add privilege(s) for a Grouper group to access target
See `Grouper API "Add or remove Grouper privileges"
<https://spaces.at.internet2.edu/display/Grouper/Add+or+remove+grouper+privileges>`_
:param access_group: Grouper group to give access to,
ex: arizona.edu:Dept:LBRY:figshare:GrouperSuperAdmins
:param target_group: Grouper group to add privilege on, ex: "apitest"
:param target_group_type: Grouper stem associated with the group to
add privilege on, ex: use 'figtest' for
'arizona.edu:Dept:LBRY:figtest:test'
:param privileges: Grouper privileges. Allowed values:
'read', 'view', 'update', 'admin', 'optin', 'optout'
:raises ValueError: Incorrect ``privileges`` or Grouper POST failed
:raises KeyError: Incorrect ``target_group_type``
:raises Exception: Incorrect ``access_group`` (check for existence)
:return: True on success, otherwise raises an Exception
"""
endpoint = self.url('grouperPrivileges')
# Check privileges
if isinstance(privileges, str):
privileges = [privileges]
for privilege in privileges:
if privilege not in ['read', 'view', 'update', 'admin', 'optin', 'optout']:
raise ValueError(f"Invalid privilege name: {privilege}")
target_groupname = figshare_group(target_group, target_group_type,
production=self.grouper_production)
try:
group_exists = self.check_group_exists(target_group, target_group_type)
except KeyError:
raise KeyError("ERROR: Stem is empty")
if group_exists:
args = self.get_group_details(access_group)
if len(args):
access_group_detail = args.pop()
else:
raise Exception(f"Could NOT find access_group: {access_group}")
# initialize
params = dict()
params['WsRestAssignGrouperPrivilegesLiteRequest'] = {
'allowed': 'T',
'subjectId': access_group_detail['uuid'],
'privilegeName': '',
'groupName': target_groupname,
'privilegeType': 'access'
}
for privilege in privileges:
params['WsRestAssignGrouperPrivilegesLiteRequest']['privilegeName'] = privilege
result = requests.post(endpoint, json=params, headers=self.headers,
auth=self.grouper_auth)
metadata = result.json()['WsAssignGrouperPrivilegesLiteResult']['resultMetadata']
if metadata['resultCode'] not in ['SUCCESS_ALLOWED', 'SUCCESS_ALLOWED_ALREADY_EXISTED']:
raise ValueError(f"Unexpected result received: {metadata['resultCode']}")
return True
def create_groups(groups: Union[str, List[str]],
group_type: str,
group_descriptions: Union[str, List[str]],
grouper_api: Grouper,
log0: Optional[Logger] = None,
add: bool = False) -> None:
"""
Process through a list of Grouper groups and add them if they don't exist
and set permissions
:param groups: List containing group names
:param group_type: Grouper stem name. Either 'portal', 'quota', or 'test'
:param group_descriptions: Descriptions of group to include as metadata.
This shows up in the Grouper UI
:param grouper_api: ``Grouper`` object
:param log0: Logging object
:param add: Indicate whether to perform update or dry run.
Default: ``False``
:raises HTTPError: Grouper POST fails
"""
if isinstance(log0, type(None)):
log0 = log_stdout()
if isinstance(groups, str):
groups = [groups]
if isinstance(group_descriptions, str):
group_descriptions = [group_descriptions]
for group, description in zip(groups, group_descriptions):
add_dict = {'group': group,
'group_type': group_type,
'description': description}
# Check if group exists
try:
group_exists = grouper_api.check_group_exists(group, group_type)
except KeyError:
log0.info("Stem is empty")
group_exists = False
if not group_exists:
log0.info(f"Group does not exist : {group}")
if add:
log0.info(f'Adding {group} ...')
try:
add_result = grouper_api.add_group(**add_dict)
if add_result:
log0.info("SUCCESS")
except HTTPError:
raise HTTPError
else:
log0.info('dry run, not performing group add')
else:
log0.info(f"Group exists : {group}")
if add:
log0.info(f'Adding admin privileges for groupersuperadmins ...')
try:
add_privilege = grouper_api.add_privilege(superadmins, group, group_type, 'admin')
if add_privilege:
log0.info("SUCCESS")
except HTTPError:
raise HTTPError
log0.info(f'Adding privileges for grouperadmins ...')
try:
add_privilege = grouper_api.add_privilege(admins, group, group_type,
['read', 'view', 'optout'])
if add_privilege:
log0.info("SUCCESS")
except HTTPError:
raise HTTPError
else:
log0.info('dry run, not performing privilege add')
def create_active_group(group: str,
grouper_dict: dict,
group_description: Optional[str] = None,
log: Optional[Logger] = None,
add: bool = False) -> None:
"""
Create a temporary group for figshare:active indirect membership
:param group: Name of group (e.g., "ual")
:param grouper_dict: Grouper configuration settings
:param group_description: Grouper description. Defaults will prompt for it
:param log: Logging object
:param add: Indicate adding group. Default: ``False`` (dry run)
"""
if isinstance(log, type(None)):
log = log_stdout()
# This is for figtest stem
ga_test = Grouper(**grouper_dict, grouper_production=False, log=log)
if isinstance(group_description, type(None)):
log.info("PROMPT: Provide description for group...")
group_description = input("PROMPT: ")
log.info(f"RESPONSE: {group_description}")
create_groups(group, 'group_active', group_description, ga_test,
log0=log, add=add)
def grouper_delta_user(group: str,
stem: str,
netid: Union[str, List[str]],
uaid: Union[str, List[str]],
action: str,
grouper_dict: Dict[str, Any],
| |
= map
def to_python(self, value):
return value
def to_url(self, value):
if isinstance(value, (bytes, bytearray)):
return _fast_url_quote(value)
return _fast_url_quote(str(value).encode(self.map.charset))
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(self, map, minlength=1, maxlength=None, length=None):
BaseConverter.__init__(self, map)
if length is not None:
length = f"{{{int(length)}}}"
else:
if maxlength is None:
maxlength = ""
else:
maxlength = int(maxlength)
length = f"{{{int(minlength)},{maxlength}}}"
self.regex = f"[^/]{length}"
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or strings::
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
"""
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = f"(?:{'|'.join([re.escape(x) for x in items])})"
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = "[^/].*?"
weight = 200
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
weight = 50
def __init__(self, map, fixed_digits=0, min=None, max=None, signed=False):
if signed:
self.regex = self.signed_regex
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
self.signed = signed
def to_python(self, value):
if self.fixed_digits and len(value) != self.fixed_digits:
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or (
self.max is not None and value > self.max
):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = str(value).zfill(self.fixed_digits)
return str(value)
@property
def signed_regex(self):
return f"-?{self.regex}"
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule("/page/<int:page>")
By default it only accepts unsigned, positive values. The ``signed``
parameter will enable signed, negative values. ::
Rule("/page/<int(signed=True):page>")
:param map: The :class:`Map`.
:param fixed_digits: The number of fixed digits in the URL. If you
set this to ``4`` for example, the rule will only match if the
URL looks like ``/0001/``. The default is variable length.
:param min: The minimal value.
:param max: The maximal value.
:param signed: Allow signed (negative) values.
.. versionadded:: 0.15
The ``signed`` parameter.
"""
regex = r"\d+"
num_convert = int
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule("/probability/<float:probability>")
By default it only accepts unsigned, positive values. The ``signed``
parameter will enable signed, negative values. ::
Rule("/offset/<float(signed=True):offset>")
:param map: The :class:`Map`.
:param min: The minimal value.
:param max: The maximal value.
:param signed: Allow signed (negative) values.
.. versionadded:: 0.15
The ``signed`` parameter.
"""
regex = r"\d+\.\d+"
num_convert = float
def __init__(self, map, min=None, max=None, signed=False):
NumberConverter.__init__(self, map, min=min, max=max, signed=signed)
class UUIDConverter(BaseConverter):
"""This converter only accepts UUID strings::
Rule('/object/<uuid:identifier>')
.. versionadded:: 0.10
:param map: the :class:`Map`.
"""
regex = (
r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-"
r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"
)
def to_python(self, value):
return uuid.UUID(value)
def to_url(self, value):
return str(value)
#: the default converter mapping for the map.
DEFAULT_CONVERTERS = {
"default": UnicodeConverter,
"string": UnicodeConverter,
"any": AnyConverter,
"path": PathConverter,
"int": IntegerConverter,
"float": FloatConverter,
"uuid": UUIDConverter,
}
class Map:
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: If a rule ends with a slash but the matched
URL does not, redirect to the URL with a trailing slash.
:param merge_slashes: Merge consecutive slashes when matching or
building URLs. Matches will redirect to the normalized URL.
Slashes in variable parts are not merged.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param encoding_errors: the error method to use for decoding
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionchanged:: 1.0
If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules
will match.
.. versionchanged:: 1.0
Added ``merge_slashes``.
.. versionchanged:: 0.7
Added ``encoding_errors`` and ``host_matching``.
.. versionchanged:: 0.5
Added ``sort_parameters`` and ``sort_key``.
"""
#: A dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
#: The type of lock to use when updating.
#:
#: .. versionadded:: 1.0
lock_class = Lock
def __init__(
self,
rules=None,
default_subdomain="",
charset="utf-8",
strict_slashes=True,
merge_slashes=True,
redirect_defaults=True,
converters=None,
sort_parameters=False,
sort_key=None,
encoding_errors="replace",
host_matching=False,
):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self._remap_lock = self.lock_class()
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.merge_slashes = merge_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
def iter_rules(self, endpoint=None):
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(
self,
server_name,
script_name=None,
subdomain=None,
url_scheme="http",
default_method="GET",
path_info=None,
query_args=None,
):
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionchanged:: 1.0
If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules
will match.
.. versionchanged:: 0.15
``path_info`` defaults to ``'/'`` if ``None``.
.. versionchanged:: 0.8
``query_args`` can be a string.
.. versionchanged:: 0.7
Added ``query_args``.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError("host matching enabled and a subdomain was provided")
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = "/"
if path_info is None:
path_info = "/"
try:
server_name = _encode_idna(server_name)
except UnicodeError:
| |
<reponame>ckarageorgkaneen/pybpod-api
# !/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import math
import socket
import sys
from confapp import conf as settings
from datetime import datetime as datetime_now
from pybpodapi.bpod.hardware.hardware import Hardware
from pybpodapi.bpod.hardware.channels import ChannelType
from pybpodapi.bpod.hardware.channels import ChannelName
from pybpodapi.bpod.hardware.events import EventName
from pybpodapi.bpod.hardware.output_channels import OutputChannel
from pybpodapi.bpod.emulator import Emulator
from pybpodapi.bpod_modules.bpod_modules import BpodModules
from pybpodapi.exceptions.bpod_error import BpodErrorException
from pybpodapi.com.messaging.end_trial import EndTrial
from pybpodapi.com.messaging.trial import Trial
from pybpodapi.com.messaging.event_occurrence import EventOccurrence
from pybpodapi.com.messaging.event_resume import EventResume
from pybpodapi.com.messaging.softcode_occurrence import SoftcodeOccurrence
from pybpodapi.com.messaging.session_info import SessionInfo
from pybpodapi.com.messaging.warning import WarningMessage
from pybpodapi.com.messaging.value import ValueMessage
from pybpodapi.com.messaging.state_transition import StateTransition
from pybpodapi.session import Session
from .non_blockingstreamreader import NonBlockingStreamReader
from .non_blockingsocketreceive import NonBlockingSocketReceive
logger = logging.getLogger(__name__)
class BpodBase(object):
"""
API to interact with Bpod
:ivar Session session: Session for this bpod running experiment
:ivar Hardware hardware: Hardware object representing Bpod hardware
:ivar MessageAPI message_api: Abstracts communication with Bpod box
:ivar bool new_sma_sent: whether a new state machine was already uploaded to Bpod box
"""
class Events(EventName):
pass
class OutputChannels(OutputChannel):
pass
class ChannelTypes(ChannelType):
pass
class ChannelNames(ChannelName):
pass
CHECK_STATE_MACHINE_COUNTER = 0
def __init__(self, serial_port=None, sync_channel=None, sync_mode=None, net_port=None, emulator_mode=False):
self._session = self.create_session()
self.serial_port = serial_port if serial_port is not None else settings.PYBPOD_SERIAL_PORT
self.baudrate = settings.PYBPOD_BAUDRATE
self.sync_channel = sync_channel if sync_channel is not None else settings.PYBPOD_SYNC_CHANNEL
self.sync_mode = sync_mode if sync_mode is not None else settings.PYBPOD_SYNC_MODE
self.net_port = net_port if net_port is not None else settings.PYBPOD_NET_PORT
self._hardware = Hardware() # type: Hardware
if emulator_mode:
self._emulator = Emulator(self._hardware)
self.__initialize_input_command_handler()
self.bpod_modules = self._emulator.bpod_modules
else:
self._emulator = None
self.bpod_modules = None # type: BpodModules
self.bpod_start_timestamp = None
self._new_sma_sent = False # type: bool
self._skip_all_trials = False
self._hardware.sync_channel = self.sync_channel # 255 = no sync, otherwise set to a hardware channel number
self._hardware.sync_mode = self.sync_mode # 0 = flip logic every trial, 1 = every state
self.session += SessionInfo(self.session.INFO_SERIAL_PORT, self.serial_port)
self.session += SessionInfo(self.session.INFO_PROTOCOL_NAME, settings.PYBPOD_PROTOCOL)
self.session += SessionInfo(self.session.INFO_CREATOR_NAME, settings.PYBPOD_CREATOR)
self.session += SessionInfo(self.session.INFO_PROJECT_NAME, settings.PYBPOD_PROJECT)
self.session += SessionInfo(self.session.INFO_EXPERIMENT_NAME, settings.PYBPOD_EXPERIMENT)
self.session += SessionInfo(self.session.INFO_BOARD_NAME, settings.PYBPOD_BOARD)
self.session += SessionInfo(self.session.INFO_SETUP_NAME, settings.PYBPOD_SETUP)
self.session += SessionInfo(self.session.INFO_BPODGUI_VERSION, settings.PYBPOD_BPODGUI_VERSION)
if self.net_port:
self.session += SessionInfo(self.session.INFO_NET_PORT, self.net_port)
for subject_name in settings.PYBPOD_SUBJECTS:
self.session += SessionInfo(self.session.INFO_SUBJECT_NAME, subject_name)
if hasattr(settings, 'PYBPOD_VARSNAMES'):
for varname in settings.PYBPOD_VARSNAMES:
self.session += ValueMessage(varname, getattr(settings, varname))
#########################################
############ PUBLIC METHODS #############
#########################################
def loop_handler(self):
"""
handler that will execute on every loop when the bpod is running
"""
pass
def open(self):
"""
Starts Bpod.
Connect to Bpod board through serial port, test handshake, retrieve firmware version,
retrieve hardware description, enable input ports and configure channel synchronization.
Example:
.. code-block:: python
my_bpod = Bpod().open("/dev/tty.usbmodem1293", "/Users/John/Desktop/bpod_workspace", "2afc_protocol")
:param str serial_port: serial port to connect
:param str workspace_path: path for bpod output files (no folders will be created)
:param str session_name: this name will be used for output files
:param int baudrate [optional]: baudrate for serial connection
:param int sync_channel [optional]: Serial synchronization channel: 255 = no sync, otherwise set to a hardware channel number
:param int sync_mode [optional]: Serial synchronization mode: 0 = flip logic every trial, 1 = every state
:return: Bpod object created
:rtype: pybpodapi.model.bpod
"""
logger.info("Starting Bpod")
self._bpodcom_connect(self.serial_port, self.baudrate)
if not self._bpodcom_handshake():
raise BpodErrorException('Error: Bpod failed to confirm connectivity. Please reset Bpod and try again.')
#########################################################
### check the firmware version ##############################
#########################################################
firmware_version, machine_type = self._bpodcom_firmware_version()
if firmware_version < int(settings.TARGET_BPOD_FIRMWARE_VERSION):
raise BpodErrorException('Error: Old firmware detected. Please update Bpod 0.7+ firmware and try again.')
if firmware_version > int(settings.TARGET_BPOD_FIRMWARE_VERSION):
raise BpodErrorException('Error: Future firmware detected. Please update the Bpod python software.')
self._hardware.firmware_version = firmware_version
self._hardware.machine_type = machine_type
#########################################################
self._bpodcom_hardware_description(self._hardware)
if not self._bpodcom_enable_ports(self._hardware):
raise BpodErrorException('Error: Failed to enable Bpod inputs.')
if not self._bpodcom_set_sync_channel_and_mode(sync_channel=self.sync_channel, sync_mode=self.sync_mode):
raise BpodErrorException('Error: Failed to configure syncronization.')
# check if any module is connected
self.bpod_modules = self._bpodcom_get_modules_info(self._hardware)
self._hardware.setup(self.bpod_modules)
self.__initialize_input_command_handler()
return self
def close(self, ignore_emulator=False):
"""
Close connection with Bpod
"""
self.session += SessionInfo(self.session.INFO_SESSION_ENDED, datetime_now.now())
if hasattr(settings, 'PYBPOD_VARSNAMES'):
for varname in settings.PYBPOD_VARSNAMES:
self.session += ValueMessage(varname, getattr(settings, varname))
if self._emulator is None or ignore_emulator:
self._bpodcom_disconnect()
del self._session
if self.socketin is not None:
self.socketin.close()
self.sock.close()
if self.stdin is not None:
self.stdin.close()
def stop_trial(self):
self._bpodcom_stop_trial()
def pause(self):
self._bpodcom_pause_trial()
def resume(self):
self._bpodcom_resume_trial()
def refresh_modules(self):
# check if any module is connected
self.bpod_modules = self._bpodcom_get_modules_info(self._hardware)
self._hardware.setup(self.bpod_modules)
def register_value(self, name, value):
self._session += ValueMessage(name, value)
def send_state_machine(self, sma, run_asap=None, ignore_emulator=False):
"""
Builds message and sends state machine to Bpod
:param pybpodapi.model.state_machine sma: initialized state machine
"""
if self._emulator is None and not self.bpod_com_ready:
raise Exception('Bpod connection is closed')
if self._skip_all_trials is True:
return
logger.info("Sending state machine")
sma.update_state_numbers()
if self._emulator and not ignore_emulator:
self._emulator.set_state_machine(sma)
self._emulator.log_state_machine_info()
else:
state_machine_body = sma.build_message() + sma.build_message_global_timer() + sma.build_message_32_bits()
self._bpodcom_send_state_machine(sma.build_header(run_asap, len(state_machine_body)) + state_machine_body)
self._new_sma_sent = True
def run_state_machine(self, sma):
"""
Adds a new trial to current session and runs state machine on Bpod box.
While state machine is running, messages are processed accordingly.
When state machine stops, timestamps are updated and trial events are processed.
Finally, data is released for registered data consumers / exporters.
.. seealso::
Send command "run state machine": :meth:`pybpodapi.bpod.bpod_base.BpodBase.run_state_machine`.
Process opcode: :meth:`pybpodapi.bpod.bpod_base.BpodBase._BpodBase__process_opcode`.
Update timestamps: :meth:`pybpodapi.bpod.bpod_base.BpodBase._BpodBase__update_timestamps`.
:param (:class:`pybpodapi.state_machine.StateMachine`) sma: initialized state machine
"""
if self._emulator is None and not self.bpod_com_ready:
raise Exception('Bpod connection is closed')
if self._skip_all_trials is True:
return False
self.session += Trial(sma)
logger.info("Running state machine, trial %s", len(self.session.trials))
self.trial_timestamps = [] # Store the trial timestamps in case bpod is using live_timestamps
if self._emulator:
self._emulator.set_state_machine(sma)
self._emulator.initialize()
self._emulator.mirror_state(sma.current_state)
# TODO: Do the BpodSystem.RefreshGUI equivalent
self.trial_start_timestamp = self._emulator.matrix_start_time
else:
self._bpodcom_run_state_machine()
if self._new_sma_sent:
if self._bpodcom_state_machine_installation_status():
self._new_sma_sent = False
else:
raise BpodErrorException('Error: The last state machine sent was not acknowledged by the Bpod device.', self)
self.trial_start_timestamp = self._bpodcom_get_trial_timestamp_start()
if self.bpod_start_timestamp is None:
self.bpod_start_timestamp = self.trial_start_timestamp
#####################################################
# create a list of executed states
state_change_indexes = []
pause_task = False
# flags used to stop a trial (or all trials)
interrupt_task = False
kill_task = False
sma.is_running = True
while sma.is_running:
# read commands from the stdin ######################
if self.stdin is not None:
inline = self.stdin.readline()
if inline is not None:
pause_task, interrupt_task, kill_task = self.handle_inline(inline, sma)
#####################################################
# read commands from a net socket ###################
if self.socketin is not None:
inline = self.socketin.readline()
if inline is not None:
inline = inline.decode().strip()
pause_task, interrupt_task, kill_task = self.handle_inline(inline, sma)
#####################################################
if pause_task:
continue
opcode, data = None, None
if self._emulator:
opcode, data = self._emulator.run()
elif self.data_available():
opcode, data = self._bpodcom_read_opcode_message()
if opcode is not None and data is not None and \
not self.__process_opcode(
sma, opcode, data, state_change_indexes):
break
self.loop_handler()
if interrupt_task or kill_task:
self._skip_all_trials = True
break
if self._emulator:
self._emulator.mirror_state(None)
self.session += EndTrial('The trial ended')
if not interrupt_task:
self.__update_timestamps(sma, state_change_indexes)
self.session.add_trial_events()
logger.info("Publishing Bpod trial")
if interrupt_task and kill_task:
self.close()
exit(0)
return not interrupt_task
def handle_inline(self, inline, sma):
pause_task = False
interrupt_task = False
kill_task = False
if inline.startswith('pause-trial'):
if self._emulator:
pause_task = True
else:
self.pause()
elif inline.startswith('resume-trial'):
if self._emulator:
pause_task = False
else:
self.resume()
elif inline.startswith('stop-trial'):
if self._emulator:
interrupt_task = True
else:
self.stop_trial()
elif inline.startswith('close'):
if self._emulator is None:
self.stop_trial()
interrupt_task = True
elif inline.startswith('kill'):
if self._emulator is None:
self.stop_trial()
interrupt_task = kill_task = True
elif inline.startswith('SoftCode'):
softcode = int(inline[-1]) - 1
self.trigger_softcode(softcode)
elif inline.startswith('trigger_input:'):
tdata = inline.split(':')
chn_name = tdata[1]
evt_data = tdata[2]
# TODO: surround this call in a try except to capture calls with unavailable channel names
channel_number = sma.hardware.channels.input_channel_names.index(chn_name)
self.trigger_input(channel_number, evt_data)
elif inline.startswith('trigger_output:'):
tdata = inline.split(':')
chn_name = tdata[1]
evt_data = tdata[2]
# TODO: surround this call in a try except to capture calls with unavailable channel names
channel_number = sma.hardware.channels.output_channel_names.index(chn_name)
self.trigger_output(channel_number, evt_data)
elif inline.startswith('message:'):
tdata = inline.split(':')
module_index = int(tdata[1])
msg = tdata[2]
final_msg = []
msg_elems = msg.split()
if msg_elems[0].startswith('\''):
final_msg.append(ord(msg_elems[0][1]))
for x in msg_elems[1:]:
final_msg.append(int(x))
self.load_message(module_index, final_msg)
return pause_task, interrupt_task, kill_task
def load_serial_message(self, serial_channel, message_ID, serial_message):
"""
Load serial message on Bpod
:param int serial_channel: Serial port to send, 1, 2 or 3
:param int message_ID: Unique id for the message. Should be between 1 and 255
:param list(int) serial_message: Message to send. The message should be bigger than 3 bytes.
"""
response = self._bpodcom_load_serial_message(serial_channel, message_ID, serial_message, 1)
if not response:
raise BpodErrorException('Error: Failed to set serial message.')
def reset_serial_messages(self):
"""
Reset serial messages to equivalent byte codes (i.e. message# 4 | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Simple BPG Image viewer.
Copyright (c) 2014-2018, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from sys import argv,exit,version_info
from os import listdir,access,R_OK,stat,close,remove
from os.path import exists,isfile,isdir,dirname,basename,realpath,join,abspath
from tempfile import mkstemp
from shutil import copyfile
from subprocess import Popen,PIPE,STDOUT
from math import floor
from struct import unpack
from platform import system
from threading import Thread,Lock
import locale
if system()=="Windows":
osflag=False
from subprocess import STARTUPINFO
else:
osflag=True
from os import mkfifo,O_RDONLY,O_NONBLOCK
from os import open as osopen
from os import read as osread
from os import close as osclose
import errno
wxapp=False
class translator():
def __init__(self):
self.voc={}
self.locale=locale.getdefaultlocale()
def find(self,key):
try: wxu=True if (wx.VERSION[0]>3 or osflag) else False
except: wxu=False
if key in self.voc:
if self.locale[0] in self.voc[key]:
if wxu: return self.voc[key][self.locale[0]]
else:
return self.voc[key][self.locale[0]].encode(\
self.locale[1])
return key
t=translator()
def add(tr,key,language,translation):
if key in tr.voc:
tr.voc[key][language]=translation
else:
tr.voc[key]={language:translation}
def load(tr,data):
if type(data)==tuple and len(data):
for l in data:
if type(l)==tuple and len(l)==3:
add(tr,l[0],l[1],l[2])
else:
return
load(t,(\
("Please install","ru_RU","Пожалуйста, установите"),\
("or higher","ru_RU","или новее"),\
("Under Debian or Ubuntu you may try","ru_RU",
"В Debian или Ubuntu Вы можете попробовать следующую команду"),\
("BPG decoder not found!\n","ru_RU","BPG декодер не найден!\n"),\
("BPG decoding error!\n","ru_RU","Ошибка при декодировании файла!\n"),\
("Unable to open ","ru_RU","Невозможно открыть файл "),\
("File","ru_RU","Файл"),("is not a BPG-File!",
"ru_RU","не является файлом в формате BPG!"),\
("Press Ctrl+O to open BPG file...","ru_RU",
"Нажмите Ctrl+O, чтобы открыть файл BPG..."),\
("Unable to create FIFO file!","ru_RU","Невозможно создать файл FIFO!"),\
("Loading...","ru_RU","Загрузка..."),\
("Rotating...","ru_RU","Поворот..."),\
("This is BPG image file viewer. Hot keys:\n","ru_RU",
"Просмотр изображений в формате BPG. Клавиатурные сочетания:\n"),\
("Esc - close\n","ru_RU","Esc - выход\n"),\
("Ctrl+O - open BPG image file\n","ru_RU","Ctrl+O - открыть файл\n"),\
("Ctrl+S - save a copy of the opened file as a PNG file\n","ru_RU",
"Ctrl+S - сохранить копию изображения в формате PNG\n"),\
("Ctrl+C - save a copy of the opened file\n","ru_RU",
"Ctrl+C - сохранить копию исходного файла\n"),\
("Ctrl+R - rotate 90 degrees clockwise\n","ru_RU",
"Ctrl+R - поворот на 90 градусов по часовой стрелке\n"),\
("Ctrl+L - rotate 90 degrees counterclockwise\n","ru_RU",
"Ctrl+L - поворот на 90 градусов против часовой стрелки\n"),\
("Ctrl+F - toggle full screen mode\n","ru_RU",
"Ctrl+F - включить/выключить полноэкранный режим\n"),\
("Ctrl+T - toggle 'stay on top' mode\n","ru_RU",
"Ctrl+T - включить/выключить режим 'поверх остальных'\n"),\
("Ctrl+Left,Home - jump to the first image in folder\n","ru_RU",
"Ctrl+Left,Home - перейти к первому изображению в папке\n"),\
("Ctrl+Right,End - jump to the last image in folder\n","ru_RU",
"Ctrl+Right,End - перейти к последнему изображению в папке\n"),\
("+ - zoom in (up to 100%)\n","ru_RU","+ - увеличить (не более чем до 100%)\n"),\
("- - zoom out (down to the smallest available size)\n","ru_RU",
"- - уменьшить (до минимального доступного размера)\n"),\
("* - zoom out to fit window area\n","ru_RU",
"* - уменьшить до размеров по умолчанию\n"),\
("Left,Up,Right,Down - move over the scaled image\n","ru_RU",
"Left,Up,Right,Down - перемещение увеличенного изображения в окне просмотра\n"),\
("PgUp,Backspace,A,S - view previous file\n","ru_RU",
"PgUp,Backspace,A,S - перейти к предыдущему файлу в директории\n"),\
("PgDown,Return,D,W - view next file\n","ru_RU",
"PgDown,Return,D,W - перейти к следующему файлу в директории\n"),\
("Delete - delete current file\n","ru_RU",
"Delete - удалить текущий файл\n"),\
("Help","ru_RU","Помощь"),\
("Delete file","ru_RU","Удалить файл"),\
("File deletion!","ru_RU","Удаление файла"),\
("Unable to delete:","ru_RU","Невозможно удалить:"),\
("Open BPG file","ru_RU","Открыть файл BPG"),\
("BPG files","ru_RU","Файлы BPG"),\
("Save BPG file as PNG file","ru_RU",
"Сохранить копию изображения в формате PNG"),\
("PNG files","ru_RU","Файлы PNG"),\
("Saving PNG file...","ru_RU","Сохранение копии файла (PNG)..."),\
("Unable to save","ru_RU","Невозможно сохранить файл"),\
("Save a copy...","ru_RU","Сохранение копии файла..."),\
("Zooming in...","ru_RU","Увеличение..."),\
("Zooming out...","ru_RU","Уменьшение..."),
("Error!","ru_RU","Ошибка!")))
def _(s):
return t.find(s)
def __(s,codepage):
if version_info[0]<3:
if type(s) is unicode: s=s.encode(codepage)
return s
def errmsg(msg):
if osflag:
try:
f=Popen(['notify-send',msg],False,stdin=None,stdout=None,\
stderr=None)
f.wait()
except:
try:
f=Popen(['xmessage',msg],False,stdin=None,stdout=None,\
stderr=None)
f.wait()
except: pass
else:
import ctypes
MessageBox=ctypes.windll.user32.MessageBoxA
MessageBox(0,msg,_('Error!'),16)
if not(osflag):
try: import win32file,win32pipe
except:
msg=_("Please install")+" Python for Windows Extensions\n\
(http://sourceforge.net/projects/pywin32/)!"
errmsg(msg)
raise RuntimeError(msg)
try: import wx
except:
msg=_("Please install")+" wxPython 2.8 ("+_("or higher")+\
") (http://www.wxpython.org/)!\n"+\
_("Under Debian or Ubuntu you may try")+":\n"\
"sudo aptitude install python-wxgtk2.8\n"+_("or")+"\n"+\
"sudo aptitude install python-wxgtk3.0"
errmsg(msg)
raise RuntimeError(msg)
try:
from PIL import Image
from PIL.Image import core as _imaging
except:
msg=_("Please install")+" Python Imaging Library (PIL) 1.1.7 ("+\
_("or higher")+") (http://www.pythonware.com/products/pil/)\n"+\
_("or")+" Pillow 3.2.0 ("+_("or higher")+\
") (https://pillow.readthedocs.org/en/3.2.x/)!\n"+\
_("Under Debian or Ubuntu you may try")+":\n"\
"sudo aptitude install python-imaging\n"+_("or")+"\n"+\
"sudo aptitude install python-pil"
errmsg(msg)
raise RuntimeError(msg)
from wx.lib.embeddedimage import PyEmbeddedImage
bpglogo=PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAABHNCSVQICAgIfAhkiAAAAAlw"
"SFlzAAABBgAAAQYBzdMzvAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA"
"<KEY>"
"<KEY>"
"cGJ843sdm+ZP1++bf+d7zvn+7u+c3/n5niuklOTCmTNnisbHRTvIp4DtwMNAFVCcs2PhiANB"
"4BtgCMRZt1t6Ozs7Z3N1EmYOvP/++WKLZfpFkG8CFcssNl9MgujRNPvx7u6OuBHB0IHe3n+1"
"Sqn8E6hfaYV5YlSI5O+7uvYNLm5QFht6e/uelVLx8sMRD1AvpeLt7e17dnGDLgIp8eJUvqPa"
"bFZsNuv3UpZIzJFIzOXNF0I+19W193T694ID88vGS47N6XCU4XJV4HCU4XCso7T0oQdXnoGZ"
"mRjh8D3C4Snu3JkkHJ7KRY8LkWxfWE5CSjm/YaeGMVk2FouCx1NPQ0P1sgheCiMjAYaHR9G0"
"pBllVNPKPN3dHXElJXD6RUzEO53ltLc3r5p4gIaGatrbm3E6y80o9fOaEX19fUXj44QwSJVO"
"Zzk7d/4cIVZQbQ5ICQMDnzMxETFqnnS7qVRSh1S2eItFoalp85qJBxACmpo2Y7FkJUuAivFx"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"gv37f2s4x9xcgqNH/5aXnoIciMfjfPttCKfTSUlJybxtlvHx/6Jp95+Mz+czHWNuTgPg8uVL"
"phyLJX9ZBTng96tIKfnwwxO0trYBIKWktbVZN6nPN8bmzVs4d64/bTt58u+89dYRrFYLAIOD"
"g7S17eTo0WO6OQ4ePIDNVrQyDoyNjQGwaVNd2iaEIBaLUVNTo3OgtraW4uL7Ca601I6iKFRX"
"p3g3blznySd3U1OzUTfHtm3b2b//dyvjgM83RlFREZWVlWlbOBzm7t27bNmyNW1TVR+PPdau"
"63v7tp/KykpsNhuzs7OEQiHq6uqy5nj33b8WIqkwB1TVh91u5/Tpk2nb1atXKCtbR0tLCwDJ"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"LI6UEr9fpbHxp/lKAgpYQj6fDyEEzzzza50QTdPSp3KKN8aePR05x4pGo/T2HufEiY909lgs"
"RktLm0kvYxTgwBhSSr788obOvnWrh/r6BgC++y5CJBJZMgLxeJxkMkksFtPZXS4XjY2N+UoC"
"HmAJZaKpaRs9Pe9lcFIpNHNJGeH55/+AWPS6Y/36Dbz99nsmPcwhPvigT5e3bDYrHR07Cx5o"
"NXD+/EDW/4GsCCQSc8zMxBab1xwzMzHDl8AKqZsRHcLhe6uhqSCYaIorpK51FpFzvh1eE5ho"
"Ciqk7qR0uHNncsUFFQoTTd8owNBiazg8xchIwIC/NhgZCZhFYEgBcdaoZXh4lOnpqFHTqmJ6"
"Osrw8KhJqziruN3SC2TFR9OSXLt2kwKLw2WFlHDt2k2zi45Jt1t6ldQ9rOgxYkxMRBgY+HxN"
"IjE9Hc11NwCIns7OzlkFQNPsxwHDOE1MRPB6r67qnhgZCeD1Xs0hntF5zT+SS74F/F9fsy5g"
"3okTLP+3EN8XcSHkwUzx8GP81ACgq2vfoKaVeUC8gkGKXUVMgnhF08o8RuIhx9cqC/ihf27z"
"P7EZ5A4mdx+jAAAAAElFTkSuQmCC")
def errmsgbox(msg):
if not(wxapp): app=wx.App(0)
print(msg)
wx.MessageBox(msg,_('Error!'),wx.OK|wx.ICON_ERROR)
if not(wxapp): app.Exit()
def bpggetcmd():
binname='bpgdec'
if not osflag: binname+='.exe'
bpgpath=join(dirname(realpath(argv[0])),binname)
if not(isfile(bpgpath)):
msg=_('BPG decoder not found!\n')
errmsgbox(msg)
exit()
return bpgpath
class GenBitmap(wx.Panel):
def __init__(self,parent,ID,bitmap,pos=wx.DefaultPosition,
size=wx.DefaultSize,style=0):
if not style & wx.BORDER_MASK: style=style|wx.BORDER_NONE
wx.Panel.__init__(self,parent,ID,pos,size,style)
self._bitmap=bitmap
self._clear=False
self.SetInitialSize(size)
self.Bind(wx.EVT_ERASE_BACKGROUND,lambda e: None)
self.Bind(wx.EVT_PAINT,self.OnPaint)
def SetBitmap(self,bitmap):
self._bitmap=bitmap
self.SetInitialSize((bitmap.GetWidth(),bitmap.GetHeight()))
self.Refresh()
def GetBitmap(self): return self._bitmap
def OnPaint(self,event):
dc=wx.PaintDC(self)
if self._clear: dc.Clear()
if self._bitmap:
dc.DrawBitmap(self._bitmap,0,0,True)
self._clear=False
class DecodeThread(Thread):
def __init__(self,parent,func):
Thread.__init__(self)
self.parent=parent
self.func=func
def run(self):
if self.parent.dlock.acquire(False):
self.func()
self.parent.dlock.release()
SE_EVT_TYPE=wx.NewEventType()
SE_EVT_BNDR=wx.PyEventBinder(SE_EVT_TYPE,1)
class ShowEvent(wx.PyCommandEvent):
def __init__(self,etype,eid,value=None):
wx.PyCommandEvent.__init__(self,etype,eid)
self.value=value
class FileDropTarget(wx.FileDropTarget):
def __init__(self,obj):
wx.FileDropTarget.__init__(self)
self.obj=obj
def OnDropFiles(self,x,y,filenames):
self.obj.showempty()
self.obj.index=0
self.obj.filelist=[]
self.obj.showimage(self.obj.checkpath(filenames[0]))
return True
class DFrame(wx.Frame):
def bpgdecode(self,filename):
msg=None
cmd=self.bpgpath
self.frames_index=0
if len(self.frames): self.frames=[]
if self.img:
del self.img
self.img=None
if len(filename)>4 and filename[-4:].lower()=='.bpg':
try:
if not(isfile(filename) and access(filename,R_OK)):
msg=_('Unable to open')+'\"%s\"!'%filename
except: return False
if not(msg):
err=0
try:
imbuffer=''
if osflag:
fifo=osopen(self.fifo,O_RDONLY|O_NONBLOCK)
cmd+=' "'+realpath(filename)+'" '+self.fifo+\
' >/dev/null 2>&1'
f=Popen(cmd,shell=True,stdin=None,stdout=None,\
stderr=None)
if fifo:
while True:
if f.poll()!=None: break;
try: data=osread(fifo,16777216)
except OSError as e:
if e.errno==errno.EAGAIN or\
e.errno==errno.EWOULDBLOCK: data=''
else: raise
if len(data): imbuffer+=data
osclose(fifo)
else:
si=STARTUPINFO()
si.dwFlags|=1
si.wShowWindow=0
pname='\\\\.\\pipe\\'+basename(self.fifo)
tpipe=win32pipe.CreateNamedPipe(
pname,
win32pipe.PIPE_ACCESS_DUPLEX,
win32pipe.PIPE_TYPE_BYTE|win32pipe.PIPE_WAIT,
1,16777216,16777216,2000,None)
cmd+=' "'+realpath(filename)+'" '+pname
f=Popen(cmd,shell=False,stdin=None,stdout=None,\
stderr=None,bufsize=0,startupinfo=si)
win32pipe.ConnectNamedPipe(tpipe,None)
imbuffer=''
if version_info[0]<3: imbuffer=''
else: imbuffer=b''
if tpipe:
while True:
data=None
try: data=win32file.ReadFile(tpipe,16777216)
except: data=None
if not(data): break
if data[0]!=0: break
if len(data[1]): imbuffer+=data[1]
win32pipe.DisconnectNamedPipe(tpipe)
f.wait()
if len(imbuffer):
x,=unpack("i",imbuffer[0:4])
y,=unpack("i",imbuffer[4:8])
n,=unpack("i",imbuffer[8:12])
d,=unpack("i",imbuffer[12:16])
if n==0 and d==1:
try:
self.img=Image.frombytes('RGBA',(x,y),
imbuffer[16:])
except: err=1
else:
self.scale=100.0
self.autoscale=self.scale
self.bitmap_text=str(x)+'x'+str(y)
self.imginfo='%.2f'%self.scale+'%@'+self.bitmap_text
ishift=8
dr=n*1000/d
while True:
try:
n,=unpack("i",imbuffer[ishift:ishift+4])
ishift+=4
d,=unpack("i",imbuffer[ishift:ishift+4])
ishift+=4
except: break
try:
img=Image.frombytes('RGBA',(x,y),
imbuffer[ishift:])
except: break
ishift+=(x*y*4)
self.frames.append([self.bitmapfrompil(img),n*1000/d])
else: err=1
del imbuffer
else: err=1
except: err=1
if err: msg=_('BPG decoding error!\n')
else: msg=_('File')+' \"%s\" '%filename+_('is not a BPG-File!')
if msg:
wx.PostEvent(self,ShowEvent(SE_EVT_TYPE,-1,value=msg))
if self.img:
del self.img
self.img=None
else: return True
return False
def stitle(self,title):
self.Title=title
if osflag: self.Update()
else: self.Refresh()
def deftitle(self):
self.stitle(_('Press Ctrl+O to open BPG file...'))
def getcsize(self):
cr=wx.Display().GetClientArea()
cw=self.GetSize()
cc=self.GetClientSize()
return cr[2]-cr[0]-cw[0]+cc[0],cr[3]-cr[1]-cw[1]+cc[1]
def bitmapfrompil(self,img):
if wx.VERSION[0]<4:
return wx.BitmapFromBufferRGBA(img.size[0],\
img.size[1],img.convert("RGBA").tobytes())
else:
return wx.Bitmap.FromBufferRGBA(img.size[0],\
img.size[1],img.convert("RGBA").tobytes())
def scaleframe(self,img,width,height):
if img:
return self.bitmapfrompil(img.resize((int(width),\
int(height)),Image.ANTIALIAS))
else: return None
def scalebitmap(self,width,height):
if self.img:
return self.scaleframe(self.img,width,height)
else: return None
def showsingleframe(self,bitmap):
self.bitmap.SetBitmap(bitmap)
if wx.VERSION[0]<4: self.bitmap.SetToolTipString(self.imginfo)
else: self.bitmap.SetToolTip(self.imginfo)
x,y=bitmap.GetSize()
self.panel.SetVirtualSize((x,y))
self.panel.SetScrollbars(1,1,x,y)
self.panel.SetScrollRate(1,1)
cx,cy=self.getcsize()
if not(self.IsMaximized()) and not(self.IsFullScreen()) and\
x<=cx and y<=cy:
self.panel.SetInitialSize(size=(x,y))
self.panel.SetClientSize((x,y))
self.Layout()
self.Fit()
else: self.Layout()
def shownextframe(self,event):
if len(self.frames):
self.frames_index+=1
if self.frames_index==len(self.frames): self.frames_index=0
self.showsingleframe(self.frames[self.frames_index][0])
self.frame_timer.Start(self.frames[self.frames_index][1],
wx.TIMER_ONE_SHOT)
def showframes(self):
self.bitmap._clear=True
if len(self.frames)==0: self.showempty()
else:
bitmap=self.frames[0][0]
self.showsingleframe(bitmap)
self.frame_timer.Start(self.frames[self.frames_index][1],
wx.TIMER_ONE_SHOT)
def showbitmap(self,bitmap):
self.bitmap._clear=True
if bitmap==None: self.showempty()
else:
self.imginfo='%.2f'%self.scale+'%@'+self.bitmap_text
self.showsingleframe(bitmap)
def emptybitmap(self):
if wx.VERSION[0]<4: buffer=wx.EmptyBitmap(400,300)
else: buffer=wx.Bitmap(400,300)
dc=wx.BufferedDC(None,buffer)
dc.SetBackground(wx.Brush(self.panel.GetBackgroundColour()))
dc.Clear()
dc.Destroy()
return buffer
def showempty(self):
if len(self.frames): self.frames=[]
if self.img:
try: del self.img
except: pass
self.img=None
self.showbitmap(self.emptybitmap())
self.imginfo=''
def autoscaled(self):
if self.img:
if self.IsFullScreen(): cx,cy=wx.DisplaySize()
else:
if self.IsMaximized() or self.max: cx,cy=self.GetClientSize()
else: cx,cy=self.getcsize()
d=0.0
x=self.img.size[0]
y=self.img.size[1]
self.bitmap_text=str(x)+'x'+str(y)
d0=float(cx)/float(x)
d1=float(cy)/float(y)
if d0<1.0 or d1<1.0:
| |
"""Base class git action manager (subclasses will accommodate each type)"""
from peyotl.utility.str_util import is_str_type
from peyotl.nexson_syntax import write_as_json
from peyotl.utility import get_logger
import os
# noinspection PyUnresolvedReferences
from sh import git # pylint: disable=E0611
import shutil
import sh
import locket
import codecs
import tempfile # @TEMPORARY for deprecated write_study
_LOG = get_logger(__name__)
def get_HEAD_SHA1(git_dir):
"""Not locked!
"""
head_file = os.path.join(git_dir, 'HEAD')
with open(head_file, 'r') as hf:
head_contents = hf.read().strip()
assert head_contents.startswith('ref: ')
ref_filename = head_contents[5:] # strip off "ref: "
real_ref = os.path.join(git_dir, ref_filename)
with open(real_ref, 'r') as rf:
return rf.read().strip()
def get_user_author(auth_info):
"""Return commit author info from a dict. Returns username and author string.
auth_info should have 3 strings:
`login` (a github log in)
`name`
`email`
username will be in the `login` value. It is used for WIP branch naming.
the author string will be the name and email joined. This is used in commit messages.
"""
return auth_info['login'], ("%s <%s>" % (auth_info['name'], auth_info['email']))
class MergeException(Exception):
pass
class GitWorkflowError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class RepoLock(object):
def __init__(self, lock):
self._lock = lock
def __enter__(self):
self._lock.acquire()
def __exit__(self, _type, value, traceb):
self._lock.release()
class GitActionBase(object):
@staticmethod
def clone_repo(par_dir, repo_local_name, remote):
if not os.path.isdir(par_dir):
raise ValueError(repr(par_dir) + ' is not a directory')
if not is_str_type(remote):
raise ValueError(repr(remote) + ' is not a remote string')
dest = os.path.join(par_dir, repo_local_name)
if os.path.exists(dest):
raise RuntimeError('Filepath "{}" is in the way'.format(dest))
git('clone', remote, repo_local_name, _cwd=par_dir)
@staticmethod
def add_remote(repo_dir, remote_name, remote_url):
git_dir_arg = "--git-dir={}/.git".format(repo_dir)
git(git_dir_arg, 'remote', 'add', remote_name, remote_url)
def __init__(self,
doc_type,
repo,
remote=None,
git_ssh=None,
pkey=None,
cache=None, # pylint: disable=W0613
path_for_doc_fn=None,
max_file_size=None,
path_for_doc_id_fn=None):
self.repo = repo
self.doc_type = doc_type
self.git_dir = os.path.join(repo, '.git')
self._lock_file = os.path.join(self.git_dir, "API_WRITE_LOCK")
self._lock_timeout = 30 # in seconds
self._lock = locket.lock_file(self._lock_file, timeout=self._lock_timeout)
self.repo_remote = remote
self.git_ssh = git_ssh
self.pkey = pkey
self.max_file_size = max_file_size
self.path_for_doc_fn = path_for_doc_fn
self.path_for_doc_id_fn = path_for_doc_id_fn
if os.path.isdir("{}/.git".format(self.repo)):
self.gitdir = "--git-dir={}/.git".format(self.repo)
self.gitwd = "--work-tree={}".format(self.repo)
else: # EJM needs a test?
raise ValueError('Repo "{repo}" is not a git repo'.format(repo=self.repo))
# some methods are required, but particular to each subclass
def find_WIP_branches(self, some_id): # pylint: disable=W0613
raise NotImplementedError("Subclass must implement find_WIP_branches!")
def create_or_checkout_branch(self,
gh_user,
some_id,
parent_sha,
force_branch_name=False): # pylint: disable=W0613
raise NotImplementedError("Subclass must implement create_or_checkout_branch!")
def env(self): # @TEMP could be ref to a const singleton.
d = dict(os.environ)
if self.git_ssh:
d['GIT_SSH'] = self.git_ssh
if self.pkey:
d['PKEY'] = self.pkey
return d
def acquire_lock(self):
"""Acquire a lock on the git repository"""
_LOG.debug('Acquiring lock')
self._lock.acquire()
def release_lock(self):
"""Release a lock on the git repository"""
_LOG.debug('Releasing lock')
try:
self._lock.release()
except:
_LOG.debug('Exception releasing lock suppressed.')
def path_for_doc(self, doc_id):
"""Returns doc_dir and doc_filepath for doc_id.
"""
full_path = self.path_for_doc_fn(self.repo, doc_id)
# _LOG.debug('>>>>>>>>>> GitActionBase.path_for_doc_fn: {}'.format(self.path_for_doc_fn))
# _LOG.debug('>>>>>>>>>> GitActionBase.path_for_doc returning: [{}]'.format(full_path))
return full_path
def lock(self):
""" for syntax:
with git_action.lock():
git_action.checkout()
"""
return RepoLock(self._lock)
def get_branch_list(self):
x = git(self.gitdir, self.gitwd, "branch", "--no-color")
b = []
for line in x.split('\n'):
if line.startswith('*'):
line = line[1:]
ls = line.strip()
if ls:
b.append(ls)
return b
def current_branch(self):
"""Return the current branch name"""
branch_name = git(self.gitdir, self.gitwd, "symbolic-ref", "HEAD")
return branch_name.replace('refs/heads/', '').strip()
def branch_exists(self, branch):
"""Returns true or false depending on if a branch exists"""
try:
git(self.gitdir, self.gitwd, "rev-parse", branch)
except sh.ErrorReturnCode:
return False
return True
def delete_branch(self, branch):
git(self.gitdir, self.gitwd, 'branch', '-d', branch)
def _find_head_sha(self, frag, parent_sha):
head_shas = git(self.gitdir, self.gitwd, "show-ref", "--heads")
for lin in head_shas.split('\n'):
# _LOG.debug("lin = '{l}'".format(l=lin))
if lin.startswith(parent_sha):
local_branch_split = lin.split(' refs/heads/')
# _LOG.debug("local_branch_split = '{l}'".format(l=local_branch_split))
if len(local_branch_split) == 2:
branch = local_branch_split[1].rstrip()
if branch.startswith(frag):
return branch
return None
def checkout(self, branch):
git(self.gitdir, self.gitwd, "checkout", branch)
def checkout_master(self):
git(self.gitdir, self.gitwd, "checkout", "master")
def fetch(self, remote='origin'):
"""fetch from a remote"""
git(self.gitdir, "fetch", remote, _env=self.env())
def push(self, branch, remote):
git(self.gitdir, 'push', remote, branch, _env=self.env())
def reset_hard(self):
try:
git(self.gitdir, self.gitwd, 'reset', '--hard')
except:
_LOG.exception('"git reset --hard" failed.')
def get_master_sha(self):
x = git(self.gitdir, self.gitwd, "show-ref", "master", "--heads", "--hash")
return x.strip()
def get_blob_sha_for_file(self, filepath, branch='HEAD'):
try:
r = git(self.gitdir, self.gitwd, 'ls-tree', branch, filepath)
# _LOG.debug('ls-tree said "{}"'.format(r))
line = r.strip()
ls = line.split()
# _LOG.debug('ls is "{}"'.format(str(ls)))
assert len(ls) == 4
assert ls[1] == 'blob'
return ls[2]
except:
_LOG.exception('git ls-tree failed')
raise
def get_version_history_for_file(self, filepath):
""" Return a dict representation of this file's commit history
This uses specially formatted git-log output for easy parsing, as described here:
http://blog.lost-theory.org/post/how-to-parse-git-log-output/
For a full list of available fields, see:
http://linux.die.net/man/1/git-log
"""
# define the desired fields for logout output, matching the order in these lists!
GIT_COMMIT_FIELDS = ['id',
'author_name',
'author_email',
'date',
'date_ISO_8601',
'relative_date',
'message_subject',
'message_body']
GIT_LOG_FORMAT = ['%H', '%an', '%ae', '%aD', '%ai', '%ar', '%s', '%b']
# make the final format string, using standard ASCII field/record delimiters
GIT_LOG_FORMAT = '%x1f'.join(GIT_LOG_FORMAT) + '%x1e'
try:
log = git(self.gitdir,
self.gitwd,
'--no-pager',
'log',
'--format=%s' % GIT_LOG_FORMAT,
'--follow', # Track file's history when moved/renamed...
'--find-renames=100%', # ... but only if the contents are identical!
'--',
filepath)
# _LOG.debug('log said "{}"'.format(log))
log = log.strip('\n\x1e').split("\x1e")
log = [row.strip().split("\x1f") for row in log]
log = [dict(zip(GIT_COMMIT_FIELDS, row)) for row in log]
except:
_LOG.exception('git log failed')
raise
return log
def _add_and_commit(self, doc_filepath, author, commit_msg):
"""Low level function used internally when you have an absolute filepath to add and commit"""
try:
git(self.gitdir, self.gitwd, "add", doc_filepath)
git(self.gitdir, self.gitwd, "commit", author=author, message=commit_msg)
except Exception as e:
# We can ignore this if no changes are new,
# otherwise raise a 400
if "nothing to commit" in e.message: # @EJM is this dangerous?
_LOG.debug('"nothing to commit" found in error response')
else:
_LOG.exception('"git commit" failed')
self.reset_hard()
raise
def merge(self, branch, destination="master"):
"""
Merge the the given WIP branch to master (or destination, if specified)
If the merge fails, the merge will be aborted
and then a MergeException will be thrown. The
message of the MergeException will be the
"git status" output, so details about merge
conflicts can be determined.
"""
current_branch = self.current_branch()
if current_branch != destination:
_LOG.debug('checking out ' + destination)
git(self.gitdir, self.gitwd, "checkout", destination)
try:
git(self.gitdir, self.gitwd, "merge", branch)
except sh.ErrorReturnCode:
_LOG.exception('merge failed')
# attempt to reset things so other operations can continue
git(self.gitdir, self.gitwd, "merge", "--abort")
# raise an MergeException so that caller will know that the merge failed
raise MergeException()
new_sha = git(self.gitdir, self.gitwd, "rev-parse", "HEAD")
return new_sha.strip()
def return_document(self, doc_id, branch='master', commit_sha=None, return_WIP_map=False):
"""Return the
blob[0] contents of the given doc_id,
blob[1] the SHA1 of the HEAD of branch (or `commit_sha`)
blob[2] dictionary of WIPs for this doc.
If the doc_id does not exist, it returns the empty string.
If `commit_sha` is provided, that will be checked out and returned.
otherwise the branch will be checked out.
"""
# _LOG.debug('return_document({s}, {b}, {c}...)'.format(s=doc_id, b=branch, c=commit_sha))
if commit_sha is None:
self.checkout(branch)
head_sha = get_HEAD_SHA1(self.git_dir)
else:
self.checkout(commit_sha)
head_sha = commit_sha
doc_filepath = self.path_for_doc(doc_id)
try:
with codecs.open(doc_filepath, mode='r', encoding='utf-8') as f:
content = f.read()
except:
content = None
if return_WIP_map:
d = self.find_WIP_branches(doc_id)
return content, head_sha, d
return content, head_sha
def _get_changed_docs(self,
ancestral_commit_sha,
doc_id_from_repo_path,
doc_ids_to_check=None):
"""Returns the set of documents that have changed on the master since
commit `ancestral_commit_sha` or `False` (on an error)
'doc_id_from_repo_path' is a required function
if `doc_ids_to_check` is passed in, it should be an iterable list of
IDs. Only IDs in this list will be returned.
"""
try:
x = git(self.gitdir,
self.gitwd,
"diff-tree",
"--name-only",
"-r",
ancestral_commit_sha,
"master")
except:
_LOG.exception('diff-tree failed')
return False
touched = set()
for f in x.split('\n'):
found_id = doc_id_from_repo_path(f)
if found_id:
touched.add(found_id)
if doc_ids_to_check:
tc = set(doc_ids_to_check)
return tc.intersection(touched)
return touched
def _find_WIP_branches(self, doc_id, branch_pattern):
head_shas = git(self.gitdir, self.gitwd, "show-ref", "--heads")
ret = {}
# _LOG.debug('find_WIP_branches head_shas = "{}"'.format(head_shas.split('\n')))
for lin in head_shas.split('\n'):
try:
local_branch_split = lin.split(' refs/heads/')
if len(local_branch_split) == 2:
sha, branch = local_branch_split
if branch_pattern.match(branch) or branch == 'master':
ret[branch] = sha
except:
raise
return ret
def _create_or_checkout_branch(self,
gh_user,
doc_id,
parent_sha,
branch_name_template='{ghu}_doc_{rid}',
force_branch_name=False):
if force_branch_name:
# @TEMP deprecated
branch = branch_name_template.format(ghu=gh_user, rid=doc_id)
if not self.branch_exists(branch):
try:
git(self.gitdir, self.gitwd, "branch", branch, parent_sha)
_LOG.debug('Created branch "{b}" with parent "{a}"'.format(b=branch, a=parent_sha))
except:
raise ValueError('parent sha not in git repo')
self.checkout(branch)
return branch
frag = branch_name_template.format(ghu=gh_user, rid=doc_id) + "_"
branch = self._find_head_sha(frag, parent_sha)
_LOG.debug('Found | |
<filename>poker/hand.py
import re
import random
import itertools
import functools
from decimal import Decimal
from pathlib import Path
from cached_property import cached_property
from ._common import PokerEnum, _ReprMixin
from .card import Rank, Card, BROADWAY_RANKS
__all__ = [
"Shape",
"Hand",
"Combo",
"Range",
"PAIR_HANDS",
"OFFSUIT_HANDS",
"SUITED_HANDS",
]
# pregenerated all the possible suit combinations, so we don't have to count them all the time
_PAIR_SUIT_COMBINATIONS = ("cd", "ch", "cs", "dh", "ds", "hs")
_OFFSUIT_SUIT_COMBINATIONS = (
"cd",
"ch",
"cs",
"dc",
"dh",
"ds",
"hc",
"hd",
"hs",
"sc",
"sd",
"sh",
)
_SUITED_SUIT_COMBINATIONS = ("cc", "dd", "hh", "ss")
class Shape(PokerEnum):
OFFSUIT = "o", "offsuit", "off"
SUITED = "s", "suited"
PAIR = ("",)
class _HandMeta(type):
"""Makes Hand class iterable. __iter__ goes through all hands in ascending order."""
def __new__(metacls, clsname, bases, classdict):
"""Cache all possible Hand instances on the class itself."""
cls = super(_HandMeta, metacls).__new__(metacls, clsname, bases, classdict)
cls._all_hands = tuple(cls._get_non_pairs()) + tuple(cls._get_pairs())
return cls
def _get_non_pairs(cls):
for rank1 in Rank:
for rank2 in (r for r in Rank if r < rank1):
yield cls(f"{rank1}{rank2}o")
yield cls(f"{rank1}{rank2}s")
def _get_pairs(cls):
for rank in Rank:
yield cls(rank.val * 2)
def __iter__(cls):
return iter(cls._all_hands)
def make_random(cls):
obj = object.__new__(cls)
first = Rank.make_random()
second = Rank.make_random()
obj._set_ranks_in_order(first, second)
if first == second:
obj._shape = ""
else:
obj._shape = random.choice(["s", "o"])
return obj
@functools.total_ordering
class Hand(_ReprMixin, metaclass=_HandMeta):
"""General hand without a precise suit. Only knows about two ranks and shape."""
__slots__ = ("first", "second", "_shape")
def __new__(cls, hand):
if isinstance(hand, cls):
return hand
if len(hand) not in (2, 3):
raise ValueError("Length should be 2 (pair) or 3 (hand)")
first, second = hand[:2]
self = object.__new__(cls)
if len(hand) == 2:
if first != second:
raise ValueError(
"%r, Not a pair! Maybe you need to specify a suit?" % hand
)
self._shape = ""
elif len(hand) == 3:
shape = hand[2].lower()
if first == second:
raise ValueError(f"{hand!r}; pairs can't have a suit: {shape!r}")
if shape not in ("s", "o"):
raise ValueError(f"{hand!r}; Invalid shape: {shape!r}")
self._shape = shape
self._set_ranks_in_order(first, second)
return self
def __str__(self):
return f"{self.first}{self.second}{self.shape}"
def __hash__(self):
return hash(self.first) + hash(self.second) + hash(self.shape)
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
# AKs != AKo, because AKs is better
return (
self.first == other.first
and self.second == other.second
and self.shape.val == other.shape.val
)
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
# pairs are better than non-pairs
if not self.is_pair and other.is_pair:
return True
elif self.is_pair and not other.is_pair:
return False
elif (
not self.is_pair
and not other.is_pair
and self.first == other.first
and self.second == other.second
and self._shape != other._shape
):
# when Rank match, only suit is the deciding factor
# so, offsuit hand is 'less' than suited
return self._shape == "o"
elif self.first == other.first:
return self.second < other.second
else:
return self.first < other.first
def _set_ranks_in_order(self, first, second):
# set as Rank objects.
self.first, self.second = Rank(first), Rank(second)
if self.first < self.second:
self.first, self.second = self.second, self.first
def to_combos(self):
first, second = self.first.val, self.second.val
if self.is_pair:
return tuple(
Combo(first + s1 + first + s2) for s1, s2 in _PAIR_SUIT_COMBINATIONS
)
elif self.is_offsuit:
return tuple(
Combo(first + s1 + second + s2) for s1, s2 in _OFFSUIT_SUIT_COMBINATIONS
)
else:
return tuple(
Combo(first + s1 + second + s2) for s1, s2 in _SUITED_SUIT_COMBINATIONS
)
@property
def is_suited_connector(self):
return self.is_suited and self.is_connector
@property
def is_suited(self):
return self._shape == "s"
@property
def is_offsuit(self):
return self._shape == "o"
@property
def is_connector(self):
return self.rank_difference == 1
@property
def is_one_gapper(self):
return self.rank_difference == 2
@property
def is_two_gapper(self):
return self.rank_difference == 3
@property
def rank_difference(self):
"""The difference between the first and second rank of the Hand."""
# self.first >= self.second
return Rank.difference(self.first, self.second)
@property
def is_broadway(self):
return self.first in BROADWAY_RANKS and self.second in BROADWAY_RANKS
@property
def is_pair(self):
return self.first == self.second
@property
def shape(self):
return Shape(self._shape)
@shape.setter
def shape(self, value):
self._shape = Shape(value).val
PAIR_HANDS = tuple(hand for hand in Hand if hand.is_pair)
"""Tuple of all pair hands in ascending order."""
OFFSUIT_HANDS = tuple(hand for hand in Hand if hand.is_offsuit)
"""Tuple of offsuit hands in ascending order."""
SUITED_HANDS = tuple(hand for hand in Hand if hand.is_suited)
"""Tuple of suited hands in ascending order."""
@functools.total_ordering
class Combo(_ReprMixin):
"""Hand combination."""
__slots__ = ("first", "second")
def __new__(cls, combo):
if isinstance(combo, Combo):
return combo
if len(combo) != 4:
raise ValueError("%r, should have a length of 4" % combo)
elif combo[0] == combo[2] and combo[1] == combo[3]:
raise ValueError(f"{combo!r}, Pair can't have the same suit: {combo[1]!r}")
self = super().__new__(cls)
self._set_cards_in_order(combo[:2], combo[2:])
return self
@classmethod
def from_cards(cls, first, second):
self = super().__new__(cls)
first = first.rank.val + first.suit.val
second = second.rank.val + second.suit.val
self._set_cards_in_order(first, second)
return self
def __str__(self):
return f"{self.first}{self.second}"
def __hash__(self):
return hash(self.first) + hash(self.second)
def __eq__(self, other):
if self.__class__ is other.__class__:
return self.first == other.first and self.second == other.second
return NotImplemented
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
# lookup optimization
self_is_pair, other_is_pair = self.is_pair, other.is_pair
self_first, other_first = self.first, other.first
if self_is_pair and other_is_pair:
if self_first == other_first:
return self.second < other.second
return self_first < other_first
elif self_is_pair or other_is_pair:
# Pairs are better than non-pairs
return self_is_pair < other_is_pair
else:
if self_first.rank == other_first.rank:
if self.second.rank == other.second.rank:
# same ranks, suited go first in order by Suit rank
if self.is_suited or other.is_suited:
return self.is_suited < other.is_suited
# both are suited
return self_first.suit < other_first.suit
return self.second < other.second
return self_first < other_first
def _set_cards_in_order(self, first, second):
self.first, self.second = Card(first), Card(second)
if self.first < self.second:
self.first, self.second = self.second, self.first
def to_hand(self):
"""Convert combo to :class:`Hand` object, losing suit information."""
return Hand(f"{self.first.rank}{self.second.rank}{self.shape}")
@property
def is_suited_connector(self):
return self.is_suited and self.is_connector
@property
def is_suited(self):
return self.first.suit == self.second.suit
@property
def is_offsuit(self):
return not self.is_suited and not self.is_pair
@property
def is_connector(self):
return self.rank_difference == 1
@property
def is_one_gapper(self):
return self.rank_difference == 2
@property
def is_two_gapper(self):
return self.rank_difference == 3
@property
def rank_difference(self):
"""The difference between the first and second rank of the Combo."""
# self.first >= self.second
return Rank.difference(self.first.rank, self.second.rank)
@property
def is_pair(self):
return self.first.rank == self.second.rank
@property
def is_broadway(self):
return self.first.is_broadway and self.second.is_broadway
@property
def shape(self):
if self.is_pair:
return Shape.PAIR
elif self.is_suited:
return Shape.SUITED
else:
return Shape.OFFSUIT
@shape.setter
def shape(self, value):
self._shape = Shape(value).val
class _RegexRangeLexer:
_separator_re = re.compile(r"[,;\s]+")
_rank = r"([2-9TJQKA])"
_suit = r"[cdhs♣♦♥♠]"
# the second card is not the same as the first
# (negative lookahead for the first matching group)
# this will not match pairs, but will match e.g. 86 or AK
_nonpair1 = rf"{_rank}(?!\1){_rank}"
_nonpair2 = rf"{_rank}(?!\2){_rank}"
rules = (
# NAME, REGEX, value extractor METHOD NAME
("ALL", r"XX", "_get_value"),
("PAIR", rf"{_rank}\1$", "_get_first"),
("PAIR_PLUS", rf"{_rank}\1\+$", "_get_first"),
("PAIR_MINUS", rf"{_rank}\1-$", "_get_first"),
("PAIR_DASH", rf"{_rank}\1-{_rank}\2$", "_get_for_pair_dash"),
("BOTH", rf"{_nonpair1}$", "_get_first_two"),
("BOTH_PLUS", rf"{_nonpair1}\+$", "_get_first_two"),
("BOTH_MINUS", rf"{_nonpair1}-$", "_get_first_two"),
("BOTH_DASH", rf"{_nonpair1}-{_nonpair2}$", "_get_for_both_dash"),
("SUITED", rf"{_nonpair1}s$", "_get_first_two"),
("SUITED_PLUS", rf"{_nonpair1}s\+$", "_get_first_two"),
("SUITED_MINUS", rf"{_nonpair1}s-$", "_get_first_two"),
("SUITED_DASH", rf"{_nonpair1}s-{_nonpair2}s$", "_get_for_shaped_dash"),
("OFFSUIT", rf"{_nonpair1}o$", "_get_first_two"),
("OFFSUIT_PLUS", rf"{_nonpair1}o\+$", "_get_first_two"),
("OFFSUIT_MINUS", rf"{_nonpair1}o-$", "_get_first_two"),
("OFFSUIT_DASH", rf"{_nonpair1}o-{_nonpair2}o$", "_get_for_shaped_dash"),
("X_SUITED", rf"{_rank}Xs$|X{_rank}s$", "_get_rank"),
("X_SUITED_PLUS", rf"{_rank}Xs\+$|X{_rank}s\+$", "_get_rank"),
("X_SUITED_MINUS", rf"{_rank}Xs-$|X{_rank}s-$", "_get_rank"),
("X_OFFSUIT", rf"{_rank}Xo$|X{_rank}o$", "_get_rank"),
("X_OFFSUIT_PLUS", rf"{_rank}Xo\+$|X{_rank}o\+$", "_get_rank"),
("X_OFFSUIT_MINUS", rf"{_rank}Xo-$|X{_rank}o-$", "_get_rank"),
("X_PLUS", rf"{_rank}X\+$|X{_rank}\+$", "_get_rank"),
("X_MINUS", rf"{_rank}X-$|X{_rank}-$", "_get_rank"),
("X_BOTH", rf"{_rank}X$|X{_rank}$", "_get_rank"),
# might be anything, even pair
# FIXME: 5s5s accepted
("COMBO", rf"{_rank}{_suit}{_rank}{_suit}$", "_get_value"),
)
# compile regexes when initializing class, so every instance will have them precompiled
rules = [
(name, re.compile(regex, re.IGNORECASE), method)
for (name, regex, method) in rules
]
def __init__(self, range=""):
# filter out empty matches
self.tokens = [token for token in self._separator_re.split(range) if token]
def __iter__(self):
"""Goes through all the tokens and compare them with the regex rules. If it finds a match,
makes an appropriate value for the token and yields them.
"""
for token in self.tokens:
for name, regex, method_name in self.rules:
if regex.match(token):
val_method = getattr(self, method_name)
yield name, val_method(token)
break
else:
raise ValueError("Invalid token: %s" % token)
@staticmethod
def _get_value(token):
return token
@staticmethod
def _get_first(token):
return token[0]
@staticmethod
def _get_rank(token):
return token[0] if token[1].upper() == "X" else token[1]
@classmethod
def _get_in_order(cls, first_part, second_part, token):
smaller, bigger = cls._get_rank_in_order(token, first_part, second_part)
return smaller.val, bigger.val
@classmethod
def _get_first_two(cls, token):
return cls._get_in_order(0, 1, token)
@classmethod
def _get_for_pair_dash(cls, token):
return cls._get_in_order(0, | |
<gh_stars>0
# @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import asyncio
import ipaddress
from itertools import filterfalse
import json
import logging
from logging.handlers import RotatingFileHandler
import os
import re
import signal
import socket
import subprocess
import sys
from stack.api.get import GetAttr
from stack.commands import Command
from stack.exception import CommandError
import stack.mq
class Discovery:
"""
Start or stop a daemon that listens for PXE boots and inserts the new
nodes into the database.
"""
_PIDFILE = "/var/run/stack-discovery.pid"
_LOGFILE = "/var/log/stack-discovery.log"
_get_next_ip_address_cache = {}
_get_ipv4_network_for_interface_cache = {}
@property
def hostname(self):
return f"{self._base_name}-{self._rack}-{self._rank}"
def _get_ipv4_network_for_interface(self, interface):
"""
Return an IPv4Network object for a given interface, caching the results in the process.
"""
ipv4_network = self._get_ipv4_network_for_interface_cache.get(interface)
# If we don't have a network in the cache, create it
if ipv4_network is None:
results = subprocess.run(
["ip", "-o", "-4", "address"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8"
)
for line in results.stdout.splitlines():
match = re.match(r'\d+:\s+(\S+)\s+inet\s+(\S+)', line)
if match:
if match.group(1) == interface:
ipv4_network = ipaddress.IPv4Interface(match.group(2)).network
self._get_ipv4_network_for_interface_cache[interface] = ipv4_network
self._logger.debug("found network: %s", ipv4_network)
break
else:
self._logger.debug("ip regex didn't match line: %s", line)
return ipv4_network
def _get_hosts_for_interface(self, interface):
"""
Get a iterator of IPv4Address objects for this interface, if it exists in the database
and is pxe bootable. If it isn't a valid interface, return None.
"""
# Get an IPv4Network for this interface passed in
ipv4_network = self._get_ipv4_network_for_interface(interface)
if ipv4_network is not None:
# Figure out the gateway for the interface and check that pxe is true
self._command.db.clearCache()
for row in self._command.call("list.network"):
if (
row['address'] == str(ipv4_network.network_address) and
row['mask'] == str(ipv4_network.netmask)
):
if row['pxe'] == True:
# Make sure to filter out the gateway IP address
gateway = ipaddress.IPv4Address(row['gateway'])
return filterfalse(lambda x: x == gateway, ipv4_network.hosts())
else:
self._logger.warning("pxe not enabled on interface: %s", interface)
break
else:
self._logger.warning("unknown network for interface: %s", interface)
# We couldn't find the network or it wasn't pxe enabled
return None
def _get_next_ip_address(self, interface):
"""
Get the next available IP address for the network on the provided interface.
Return None if we are out of IP addresses or if the interface is not valid.
"""
# See if we need to get the hosts() iterator for this interface
if interface not in self._get_next_ip_address_cache:
# Get the hosts iterator for this interface, return None if it isn't valid
hosts = self._get_hosts_for_interface(interface)
if hosts is None:
return None
self._get_next_ip_address_cache[interface] = hosts
# Find the next available IP address
for ip_address in self._get_next_ip_address_cache[interface]:
self._logger.debug("trying IP address: %s", ip_address)
# Make sure this IP isn't already taken
self._command.db.clearCache()
for row in self._command.call("list.host.interface"):
if (
row['ip'] == str(ip_address) and
not row['interface'].startswith("vlan")
):
self._logger.debug("IP address already taken: %s", ip_address)
break
else:
# Looks like it is free
self._logger.debug("IP address is free: %s", ip_address)
return ip_address
# No IP addresses left
return None
def _add_node(self, interface, mac_address, ip_address):
# Figure out the network for this interface
network = None
ipv4_network = self._get_ipv4_network_for_interface(interface)
if ipv4_network is not None:
self._command.db.clearCache()
for row in self._command.call("list.network"):
if (
row['address'] == str(ipv4_network.network_address) and
row['mask'] == str(ipv4_network.netmask)
):
network = row['network']
break
# The network should alway be able to be found, unless something deleted it since the
# discovery daemon started running
if network is not None:
# Add our new node
result = subprocess.run([
"/opt/stack/bin/stack",
"add",
"host",
self.hostname,
f"appliance={self._appliance_name}",
f"rack={self._rack}",
f"rank={self._rank}",
f"box={self._box}"
], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
if result.returncode != 0:
self._logger.error("failed to add host %s:\n%s", self.hostname, result.stderr)
return
# Add the node's interface
result = subprocess.run([
"/opt/stack/bin/stack",
"add",
"host",
"interface",
self.hostname,
"interface=NULL",
"default=true",
f"mac={mac_address}",
f"name={self.hostname}",
f"ip={ip_address}",
f"network={network}"
], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
if result.returncode != 0:
self._logger.error("failed to add interface for host %s:\n%s", self.hostname, result.stderr)
return
# Set the new node's install action
result = subprocess.run([
"/opt/stack/bin/stack",
"set",
"host",
"installaction",
self.hostname,
f"action={self._install_action}"
], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
if result.returncode != 0:
self._logger.error("failed to set install action for host %s:\n%s", self.hostname, result.stderr)
return
if self._install:
# Set the new node to install on boot
result = subprocess.run([
"/opt/stack/bin/stack",
"set",
"host",
"boot",
self.hostname,
"action=install"
], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
if result.returncode != 0:
self._logger.error("failed to set boot action for host %s:\n%s", self.hostname, result.stderr)
return
else:
# Set the new node to OS on boot
result = subprocess.run([
"/opt/stack/bin/stack",
"set",
"host",
"boot",
self.hostname,
"action=os"
], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
if result.returncode != 0:
self._logger.error("failed to set boot action for host %s:\n%s", self.hostname, result.stderr)
return
# Sync the global config
result = subprocess.run([
"/opt/stack/bin/stack",
"sync",
"config"
], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
if result.returncode != 0:
self._logger.error("unable to sync global config:\n%s", result.stderr)
return
# Sync the host config
result = subprocess.run([
"/opt/stack/bin/stack",
"sync",
"host",
"config",
self.hostname
], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
if result.returncode != 0:
self._logger.error("unable to sync host config:\n%s", result.stderr)
return
self._logger.info("successfully added host %s", self.hostname)
# Post the host added message
message = json.dumps({
'channel': "discovery",
'payload': {
'type': "add",
'interface': interface,
'mac_address': mac_address,
'ip_address': str(ip_address),
'hostname': self.hostname
}
})
self._socket.sendto(message.encode(), ("localhost", stack.mq.ports.publish))
else:
self._logger.error("no network exists for interface %s", interface)
def _process_dhcp_line(self, line):
# See if we are a DHCPDISCOVER message
match = re.search(r"DHCPDISCOVER from ([0-9a-f:]{17}) via (\S+)(:|$)", line)
if match:
mac_address = match.group(1)
interface = match.group(2)
self._logger.info("detected a dhcp request: %s %s", mac_address, interface)
# Is this a new MAC address?
self._command.db.clearCache()
for row in self._command.call("list.host.interface"):
if row['mac'] == mac_address:
self._logger.debug("node is already known: %s %s", mac_address, interface)
break
else:
self._logger.info("found a new node: %s %s", mac_address, interface)
# Make sure we have an IP for it
ip_address = self._get_next_ip_address(interface)
if ip_address is None:
self._logger.error("no IP addresses available for interface %s", interface)
else:
# Add the new node
self._add_node(interface, mac_address, ip_address)
# Increment the rank
self._rank += 1
else:
if "DHCPDISCOVER" in line:
self._logger.warning("DHCPDISCOVER found in line but didn't match regex:\n%s", line)
def _process_kickstart_line(self, line):
if re.search("install/sbin(/public)?/profile.cgi", line):
parts = line.split()
try:
ip_address = ipaddress.ip_address(parts[0])
status_code = int(parts[8])
# Post the host kickstart message
message = json.dumps({
'channel': "discovery",
'payload': {
'type': "kickstart",
'ip_address': str(ip_address),
'status_code': status_code
}
})
self._socket.sendto(message.encode(), ("localhost", stack.mq.ports.publish))
except ValueError as e:
self._logger.error("Invalid Apache log format: %s", line)
async def _monitor_log(self, log_path, process_line):
# Open our log file
with open(log_path, 'r') as log:
# Move to the end
log.seek(0, 2)
# Start looking for new lines in the log file
while not self._done:
line = log.readline()
if line:
process_line(line)
else:
await asyncio.sleep(1)
def _cleanup(self):
try:
os.remove(self._PIDFILE)
except:
pass
def _signal_handler(self):
self._done = True
def _get_pid(self):
pid = None
if os.path.exists(self._PIDFILE):
with open(self._PIDFILE, 'r') as f:
pid = int(f.read())
return pid
def __init__(self, logging_level=logging.INFO):
# Set up our logger
formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S")
try:
handler = RotatingFileHandler(
self._LOGFILE,
maxBytes=10*1024*1024,
backupCount=3
)
handler.setFormatter(formatter)
except PermissionError:
# We don't have write access to the logfile, so just blackhole logs
handler = logging.NullHandler()
self._logger = logging.getLogger("discovery")
self._logger.setLevel(logging_level)
self._logger.addHandler(handler)
def is_running(self):
"Check if the daemon is running."
# Is our pidfile there?
pid = self._get_pid()
if pid is not None:
# Is the process still running?
if os.path.isdir(f"/proc/{pid}"):
return True
else:
# The process no longer exists, clean up the old files
self._cleanup()
return False
def start(self, command, appliance_name=None, base_name=None,
rack=None, rank=None, box=None, install_action=None, install=None):
"""
Start the node discovery daemon.
"""
# Only start if there isn't already a daemon running
if not self.is_running():
# Make sure our appliance name is valid
if appliance_name:
try:
command.call("list.appliance", [appliance_name])
self._appliance_name = appliance_name
except CommandError:
raise ValueError(f"Unknown appliance with name {appliance_name}")
else:
self._appliance_name = "backend"
# Set up the base name
if base_name:
self._base_name = base_name
else:
self._base_name = self._appliance_name
# Set up the rack
if rack is None:
self._rack = int(GetAttr("discovery.base.rack"))
else:
self._rack = int(rack)
# Set up the rank
if rank is None:
# Start with with default
self._rank = int(GetAttr("discovery.base.rank"))
# Try to pull the next rank based on the DB
for host in command.call("list.host"):
if | |
= settings.get_inv_multiple_req_items()
recurring = settings.get_inv_req_recurring()
req_status_writable = settings.get_inv_req_status_writable()
requester_label = settings.get_inv_requester_label()
transit_status = settings.get_inv_req_show_quantity_transit()
use_commit = settings.get_inv_use_commit()
use_req_number = settings.get_inv_use_req_number()
use_workflow = settings.get_inv_req_workflow()
# Defaults for Requesting Site and Requester
requester_is_author = settings.get_inv_requester_is_author()
if requester_is_author and auth.s3_logged_in() and auth.user:
site_default = auth.user.site_id
requester_default = auth.s3_logged_in_person()
else:
site_default = None
requester_default = None
# Dropdown or Autocomplete for Requesting Site?
if settings.get_org_site_autocomplete():
site_widget = S3SiteAutocompleteWidget()
site_comment = S3PopupLink(c = "org",
f = "facility",
vars = {"child": "site_id"},
title = T("Create Facility"),
tooltip = AUTOCOMPLETE_HELP,
)
else:
site_widget = None
site_comment = S3PopupLink(c = "org",
f = "facility",
vars = {"child": "site_id"},
title = T("Create Facility"),
)
# Workflow options
workflow_opts = {1: T("Draft"),
2: T("Submitted for Approval"),
3: T("Approved"),
4: T("Completed"),
5: T("Cancelled"),
}
if use_workflow:
workflow_default = 1 # Draft
workflow_status_requires = IS_IN_SET(workflow_opts)
else:
# Don't make assumptions
workflow_default = None
workflow_status_requires = IS_EMPTY_OR(IS_IN_SET(workflow_opts))
# ---------------------------------------------------------------------
# Request Reference
#
req_ref = S3ReusableField("req_ref", "string",
label = T("%(REQ)s Number") %
{"REQ": settings.get_inv_req_shortname()},
writable = False,
)
# ---------------------------------------------------------------------
# Requests
#
tablename = "inv_req"
self.define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
req_ref(# Adds no value when not using show_link
#represent = inv_ReqRefRepresent(),
readable = use_req_number,
writable = use_req_number,
widget = lambda f, v: \
StringWidget.widget(f, v, _placeholder = T("Leave blank to have this autogenerated"))
),
s3_datetime(default = "now",
label = T("Date Requested"),
past = 8760, # Hours, so 1 year
future = 0,
readable = date_writable,
writable = date_writable,
#represent = "date",
#widget = "date",
),
req_priority()(),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
comment = site_comment,
default = site_default,
empty = False,
filterby = "obsolete",
filter_opts = (False,),
instance_types = auth.org_site_types,
label = T("Requested For Facility"),
readable = True,
represent = self.org_site_represent,
updateable = True,
widget = site_widget,
writable = True,
),
# Donations: What will the Items be used for?; People: Task Details
s3_comments("purpose",
comment = "",
label = T("Purpose"),
# Only-needed for summary mode (unused)
#represent = self.req_purpose_represent,
represent = lambda s: s if s else NONE,
),
Field("is_template", "boolean",
default = False,
label = T("Recurring Request?"),
represent = s3_yes_no_represent,
readable = recurring,
writable = recurring,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Recurring Request?"),
T("If this is a request template to be added repeatedly then the schedule can be set on the next page."))),
),
s3_datetime("date_required",
label = T("Date Needed By"),
past = 1, # Allow time for people to fill out form
future = 8760, # Hours, so 1 year
#represent = "date",
#widget = "date",
),
# Not needed for consumable goods, i.e. Inventory Requisitions:
#s3_datetime("date_required_until",
# label = T("Date Required Until"),
# past = 0,
# future = 8760, # Hours, so 1 year
# readable = False,
# writable = False
# ),
person_id("requester_id",
default = requester_default,
empty = settings.get_inv_requester_optional(),
label = requester_label,
represent = self.pr_PersonRepresentContact(link_contacts = True),
#writable = False,
comment = S3PopupLink(c = "pr",
f = "person",
vars = {"child": "requester_id",
"parent": "req",
},
title = crud_strings["pr_person"].label_create,
tooltip = AUTOCOMPLETE_HELP,
),
),
person_id("assigned_to_id", # This field should be in inv_commit, but that complicates the UI
label = T("Assigned To"),
readable = False,
writable = False,
),
person_id("approved_by_id",
label = T("Approved By"),
readable = False,
writable = False,
),
person_id("request_for_id",
#default = auth.s3_logged_in_person(),
label = T("Requested For"),
readable = False,
writable = False,
),
Field("transport_req", "boolean",
label = T("Transportation Required"),
represent = s3_yes_no_represent,
readable = ask_transport,
writable = ask_transport,
),
Field("security_req", "boolean",
label = T("Security Required"),
represent = s3_yes_no_represent,
readable = ask_security,
writable = ask_security,
),
s3_datetime("date_recv",
label = T("Date Received"), # Could be T("Date Delivered") - make deployment_setting or just use Template
past = 8760, # Hours, so 1 year
future = 0,
readable = False,
writable = False,
),
person_id("recv_by_id",
# @ToDo: Set this in Update forms? Dedicated 'Receive' button?
# (Definitely not in Create forms)
#default = auth.s3_logged_in_person(),
label = T("Received By"),
),
# Workflow Status
Field("workflow_status", "integer",
label = T("Status"),
default = workflow_default,
requires = workflow_status_requires,
represent = s3_options_represent(workflow_opts),
readable = use_workflow,
writable = False,
),
# Simple Status
# - currently just enabled in customise_req_fields() workflow
req_status_field(readable = False,
writable = False,
),
# Detailed Status
req_status_field("commit_status",
label = T("Commit Status"),
represent = self.inv_commit_status_represent,
readable = use_commit,
writable = req_status_writable and use_commit,
),
req_status_field("transit_status",
label = T("Transit Status"),
readable = transit_status,
writable = req_status_writable and transit_status,
),
req_status_field("fulfil_status",
label = T("Fulfil. Status"),
writable = req_status_writable,
),
#req_status_field("filing_status",
# label = T("Filing Status"),
# comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Filing Status"),
# T("Have all the signed documents for this shipment been filed?"))),
# readable = settings.get_inv_req_document_filing(),
# writable = False,
# ),
Field("closed", "boolean",
default = False,
label = T("Closed"),
readable = not use_workflow,
writable = not use_workflow,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Closed"),
T("No more items may be added to this request"),
)),
),
Field("cancel", "boolean",
default = False,
label = T("Cancel"),
),
Field.Method("details", inv_req_details),
Field.Method("drivers", inv_req_drivers),
s3_comments(comment = ""),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Raise Requisition"),
title_display = T("Requisition Details"),
title_list = T("Requisitions"),
title_map = T("Map of Requisitions"),
title_report = T("Requisitions Report"),
title_update = T("Edit Requisition"),
label_list_button = T("List Requisitions"),
label_delete_button = T("Delete Requisition"),
msg_record_created = T("Requisition Added"),
msg_record_modified = T("Requisition Updated"),
msg_record_deleted = T("Requisition Canceled"),
msg_list_empty = T("No Requisitions"),
)
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
filter_widgets = [
#S3TextFilter(["committer_id$first_name",
# "committer_id$middle_name",
# "committer_id$last_name",
# "site_id$name",
# "comments",
# "req_id$name",
# "organisation_id$name"
# ],
# label = T("Search"),
# comment = T("Search for a Requisition by Committer name, Requisition ID, Site or Organization."),
# ),
S3OptionsFilter("fulfil_status",
# Better to default (easier to customise/consistency)
#label = T("Fulfill Status"),
cols = 3,
),
S3LocationFilter("site_id$location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter("site_id",
# Better to default (easier to customise/consistency)
#label = T("Requested For Facility"),
hidden = True,
),
S3OptionsFilter("created_by",
label = T("Logged By"),
hidden = True,
),
S3DateFilter("date",
# Better to default (easier to customise/consistency)
#label = T("Date Requested"),
hide_time = True,
input_labels = {"ge": "From", "le": "To"},
comment = T("Search for requests made between these dates."),
hidden = True,
),
S3DateFilter("date_required",
# Better to default (easier to customise/consistency)
#label = T("Date Needed By"),
hide_time = True,
input_labels = {"ge": "From", "le": "To"},
comment = T("Search for requests required between these dates."),
hidden = True,
),
]
position = 1
if transit_status:
position += 1
filter_widgets.insert(0,
S3OptionsFilter("transit_status",
# Better to default (easier to customise/consistency)
#label = T("Transit Status"),
options = req_status_opts,
cols = 3,
))
filter_widgets.insert(position + 2,
S3OptionsFilter("req_item.item_id$item_category_id",
label = T("Item Category"),
hidden = True,
))
if use_commit:
filter_widgets.insert(position,
S3OptionsFilter("commit_status",
# Better to default (easier to customise/consistency)
#label = T("Commit Status"),
options = req_status_opts,
cols = 3,
hidden = True,
))
report_fields = ["priority",
"site_id$organisation_id",
]
rappend = report_fields.append
for level in levels:
rappend("site_id$location_id$%s" % level)
rappend("site_id")
# @ToDo: id gets stripped in _select_field
fact_fields = report_fields + [(T("Requisitions"), "id")]
# Reusable Field
req_represent = inv_ReqRepresent(show_link = True)
req_id = S3ReusableField("req_id", "reference %s" % tablename,
label = T("Requisition"),
ondelete = "CASCADE",
represent = req_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_req.id",
req_represent,
orderby = "inv_req.date",
sort = True,
)
),
sortby = "date",
)
list_fields = ["date",
"date_required",
"site_id",
"requester_id",
]
if use_req_number:
list_fields.insert(1, "req_ref")
list_fields.extend(("priority",
(T("Details"), "details"),
T("Drivers"), "drivers")
)
if use_commit:
list_fields.append("commit_status")
if transit_status:
list_fields.append("transit_status")
list_fields.append("fulfil_status")
if use_commit:
list_fields.append((T("Committed By"), "commit.site_id"))
self.configure(tablename,
context = {"location": "site_id$location_id",
"organisation": "site_id$organisation_id",
"site": "site_id",
},
deduplicate = S3Duplicate(primary = ("req_ref",)),
extra_fields = ("req_ref",),
filter_widgets = filter_widgets,
onaccept = self.inv_req_onaccept,
ondelete = self.inv_req_ondelete,
list_fields = list_fields,
orderby = "inv_req.date desc",
report_options = Storage(
rows = report_fields,
cols = report_fields,
fact = fact_fields,
methods = ["count", "list", "sum"],
defaults = Storage(
rows = "site_id$location_id$%s" % levels[0], # Highest-level of hierarchy
cols = "priority",
fact = "count(id)",
totals | |
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from multiprocessing import Value
import os
import re
from textwrap import dedent
from threading import current_thread
from time import sleep, time
from sys import maxint
from tests.stress.queries import QueryType
from tests.stress.util import create_and_start_daemon_thread, increment
from tests.util.thrift_util import op_handle_to_query_id
LOG = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
# Metrics collected during the stress running process.
NUM_QUERIES_DEQUEUED = "num_queries_dequeued"
# The number of queries that were submitted to a query runner.
NUM_QUERIES_SUBMITTED = "num_queries_submitted"
# The number of queries that have entered the RUNNING state (i.e. got through Impala's
# admission control and started executing) or were cancelled or hit an error.
NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED = "num_queries_started_running_or_cancelled"
NUM_QUERIES_FINISHED = "num_queries_finished"
NUM_QUERIES_EXCEEDED_MEM_LIMIT = "num_queries_exceeded_mem_limit"
NUM_QUERIES_AC_REJECTED = "num_queries_ac_rejected"
NUM_QUERIES_AC_TIMEDOUT = "num_queries_ac_timedout"
NUM_QUERIES_CANCELLED = "num_queries_cancelled"
NUM_RESULT_MISMATCHES = "num_result_mismatches"
NUM_OTHER_ERRORS = "num_other_errors"
RESULT_HASHES_DIR = "result_hashes"
class QueryTimeout(Exception):
pass
class QueryRunner(object):
"""Encapsulates functionality to run a query and provide a runtime report."""
SPILLED_PATTERNS = [re.compile("ExecOption:.*Spilled"), re.compile("SpilledRuns: [^0]")]
BATCH_SIZE = 1024
def __init__(self, impalad, results_dir, use_kerberos, common_query_options,
test_admission_control, check_if_mem_was_spilled=False):
"""Creates a new instance, but does not start the process. """
self.impalad = impalad
self.use_kerberos = use_kerberos
self.results_dir = results_dir
self.check_if_mem_was_spilled = check_if_mem_was_spilled
self.common_query_options = common_query_options
self.test_admission_control = test_admission_control
# proc is filled out by caller
self.proc = None
# impalad_conn is initialised in connect()
self.impalad_conn = None
# All these values are shared values between processes. We want these to be accessible
# by the parent process that started this QueryRunner, for operational purposes.
self._metrics = {
NUM_QUERIES_DEQUEUED: Value("i", 0),
NUM_QUERIES_SUBMITTED: Value("i", 0),
NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED: Value("i", 0),
NUM_QUERIES_FINISHED: Value("i", 0),
NUM_QUERIES_EXCEEDED_MEM_LIMIT: Value("i", 0),
NUM_QUERIES_AC_REJECTED: Value("i", 0),
NUM_QUERIES_AC_TIMEDOUT: Value("i", 0),
NUM_QUERIES_CANCELLED: Value("i", 0),
NUM_RESULT_MISMATCHES: Value("i", 0),
NUM_OTHER_ERRORS: Value("i", 0)}
def connect(self):
"""Connect to the server and start the query runner thread."""
self.impalad_conn = self.impalad.impala.connect(impalad=self.impalad)
def run_query(self, query, mem_limit_mb, run_set_up=False,
timeout_secs=maxint, should_cancel=False, retain_profile=False):
"""Run a query and return an execution report. If 'run_set_up' is True, set up sql
will be executed before the main query. This should be the case during the binary
search phase of the stress test.
If 'should_cancel' is True, don't get the query profile for timed out queries because
the query was purposely cancelled by setting the query timeout too short to complete,
rather than having some problem that needs to be investigated.
"""
if not self.impalad_conn:
raise Exception("connect() must first be called")
timeout_unix_time = time() + timeout_secs
report = QueryReport(query)
try:
with self.impalad_conn.cursor() as cursor:
start_time = time()
self._set_db_and_options(cursor, query, run_set_up, mem_limit_mb, timeout_secs)
error = None
try:
cursor.execute_async(
"/* Mem: %s MB. Coordinator: %s. */\n"
% (mem_limit_mb, self.impalad.host_name) + query.sql)
report.query_id = op_handle_to_query_id(cursor._last_operation.handle if
cursor._last_operation else None)
LOG.debug("Query id is %s", report.query_id)
if not self._wait_until_fetchable(cursor, report, timeout_unix_time,
should_cancel):
return report
if query.query_type == QueryType.SELECT:
try:
report.result_hash = self._fetch_and_hash_result(cursor, timeout_unix_time,
query)
if retain_profile or \
query.result_hash and report.result_hash != query.result_hash:
fetch_and_set_profile(cursor, report)
except QueryTimeout:
# TODO: IMPALA-6326: remove this cancel, which can race with the thread
# in _fetch_and_hash_result() and cause crashes and other errors.
self._cancel(cursor, report)
return report
else:
# If query is in error state, this will raise an exception
cursor._wait_to_finish()
except Exception as error:
report.query_id = op_handle_to_query_id(cursor._last_operation.handle if
cursor._last_operation else None)
LOG.debug("Error running query with id %s: %s", report.query_id, error)
self._check_for_memory_errors(report, cursor, error)
if report.has_query_error():
return report
report.runtime_secs = time() - start_time
if cursor.execution_failed() or self.check_if_mem_was_spilled:
fetch_and_set_profile(cursor, report)
report.mem_was_spilled = any([
pattern.search(report.profile) is not None
for pattern in QueryRunner.SPILLED_PATTERNS])
report.not_enough_memory = "Memory limit exceeded" in report.profile
except Exception as error:
# A mem limit error would have been caught above, no need to check for that here.
report.other_error = error
return report
def _set_db_and_options(self, cursor, query, run_set_up, mem_limit_mb, timeout_secs):
"""Set up a new cursor for running a query by switching to the correct database and
setting query options."""
if query.db_name:
LOG.debug("Using %s database", query.db_name)
cursor.execute("USE %s" % query.db_name)
if run_set_up and query.set_up_sql:
LOG.debug("Running set up query:\n%s", query.set_up_sql)
cursor.execute(query.set_up_sql)
for query_option, value in self.common_query_options.iteritems():
cursor.execute(
"SET {query_option}={value}".format(query_option=query_option, value=value))
for query_option, value in query.options.iteritems():
cursor.execute(
"SET {query_option}={value}".format(query_option=query_option, value=value))
cursor.execute("SET ABORT_ON_ERROR=1")
if self.test_admission_control:
LOG.debug(
"Running query without mem limit at %s with timeout secs %s:\n%s",
self.impalad.host_name, timeout_secs, query.sql)
else:
LOG.debug("Setting mem limit to %s MB", mem_limit_mb)
cursor.execute("SET MEM_LIMIT=%sM" % mem_limit_mb)
LOG.debug(
"Running query with %s MB mem limit at %s with timeout secs %s:\n%s",
mem_limit_mb, self.impalad.host_name, timeout_secs, query.sql)
def _wait_until_fetchable(self, cursor, report, timeout_unix_time, should_cancel):
"""Wait up until timeout_unix_time until the query results can be fetched (if it's
a SELECT query) or until it has finished executing (if it's a different query type
like DML). If the timeout expires we either cancel the query or report the timeout.
Return True in the first case or False in the second (timeout) case."""
# Loop until the query gets to the right state or a timeout expires.
sleep_secs = 0.1
secs_since_log = 0
# True if we incremented num_queries_started_running_or_cancelled for this query.
started_running_or_cancelled = False
while True:
query_state = cursor.status()
# Check if the query got past the PENDING/INITIALIZED states, either because
# it's executing or hit an error.
if (not started_running_or_cancelled and query_state not in ('PENDING_STATE',
'INITIALIZED_STATE')):
started_running_or_cancelled = True
increment(self._metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED])
# Return if we're ready to fetch results (in the FINISHED state) or we are in
# another terminal state like EXCEPTION.
if query_state not in ('PENDING_STATE', 'INITIALIZED_STATE', 'RUNNING_STATE'):
return True
if time() > timeout_unix_time:
if not should_cancel:
fetch_and_set_profile(cursor, report)
self._cancel(cursor, report)
if not started_running_or_cancelled:
increment(self._metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED])
return False
if secs_since_log > 5:
secs_since_log = 0
LOG.debug("Waiting for query to execute")
sleep(sleep_secs)
secs_since_log += sleep_secs
def update_from_query_report(self, report):
LOG.debug("Updating runtime stats (Query Runner PID: {0})".format(self.proc.pid))
increment(self._metrics[NUM_QUERIES_FINISHED])
if report.not_enough_memory:
increment(self._metrics[NUM_QUERIES_EXCEEDED_MEM_LIMIT])
if report.ac_rejected:
increment(self._metrics[NUM_QUERIES_AC_REJECTED])
if report.ac_timedout:
increment(self._metrics[NUM_QUERIES_AC_TIMEDOUT])
if report.was_cancelled:
increment(self._metrics[NUM_QUERIES_CANCELLED])
def _cancel(self, cursor, report):
report.timed_out = True
if not report.query_id:
return
try:
LOG.debug("Attempting cancellation of query with id %s", report.query_id)
cursor.cancel_operation()
LOG.debug("Sent cancellation request for query with id %s", report.query_id)
except Exception as e:
LOG.debug("Error cancelling query with id %s: %s", report.query_id, e)
try:
LOG.debug("Attempting to cancel query through the web server.")
self.impalad.cancel_query(report.query_id)
except Exception as e:
LOG.debug("Error cancelling query %s through the web server: %s",
report.query_id, e)
def _check_for_memory_errors(self, report, cursor, caught_exception):
"""To be called after a query failure to check for signs of failed due to a
mem limit or admission control rejection/timeout. The report will be updated
accordingly.
"""
fetch_and_set_profile(cursor, report)
caught_msg = str(caught_exception).lower().strip()
# Distinguish error conditions based on string fragments. The AC rejection and
# out-of-memory conditions actually overlap (since some memory checks happen in
# admission control) so check the out-of-memory conditions first.
if "memory limit exceeded" in caught_msg or \
"repartitioning did not reduce the size of a spilled partition" in caught_msg or \
"failed to get minimum memory reservation" in caught_msg or \
"minimum memory reservation is greater than" in caught_msg or \
"minimum memory reservation needed is greater than" in caught_msg:
report.not_enough_memory = True
return
if "rejected query from pool" in caught_msg:
report.ac_rejected = True
return
if "admission for query exceeded timeout" in caught_msg:
report.ac_timedout = True
return
LOG.debug("Non-mem limit error for query with id %s: %s", report.query_id,
caught_exception, exc_info=True)
report.other_error = caught_exception
def _fetch_and_hash_result(self, cursor, timeout_unix_time, query):
"""Fetches results from 'cursor' and returns a hash that is independent of row order.
Raises QueryTimeout() if we couldn't fetch all rows from the query before time()
reaches 'timeout_unix_time'.
'query' is only used for debug logging purposes (if the result is not as expected a
log file will be left in RESULTS_DIR for investigation).
| |
<reponame>steppi/gilda
"""This script benchmarks Gilda on the BioCreative VII BioID corpus.
It dumps multiple result tables in the results folder."""
import json
import os
from collections import defaultdict
from copy import deepcopy
from datetime import datetime
from functools import lru_cache
from textwrap import dedent
from typing import Any, Collection, Dict, Iterable, List, Optional, Set, Tuple
import click
import pandas as pd
import pystow
import tabulate
from lxml import etree
from tqdm import tqdm
import famplex
from gilda.grounder import Grounder, logger
from gilda.resources import mesh_to_taxonomy, popular_organisms
from indra.databases.chebi_client import get_chebi_id_from_pubchem
from indra.databases.hgnc_client import get_hgnc_from_entrez
from indra.databases.uniprot_client import get_hgnc_id
from indra.literature import pmc_client, pubmed_client
from indra.ontology.bio import bio_ontology
logger.setLevel('WARNING')
HERE = os.path.dirname(os.path.abspath(__file__))
TAXONOMY_CACHE_PATH = os.path.join(HERE, 'taxonomy_cache.json')
MODULE = pystow.module('gilda', 'biocreative')
URL = 'https://biocreative.bioinformatics.udel.edu/media/store/files/2017/BioIDtraining_2.tar.gz'
tqdm.pandas()
#: A set of the prefix->prefix mappings missing from the bio-ontology
BO_MISSING_XREFS = set()
class BioIDBenchmarker:
"""Used for evaluating gilda using data from BioCreative VI BioID track
Parameters
----------
grounder :
Grounder object to use in evaluation. If None, instantiates a grounder
with default arguments. Default: None
equivalences :
Dictionary of mappings between namespaces. Maps strings of the form
f'{namespace}:{id}' to strings for equivalent groundings. This is
used to map groundings from namespaces used the the BioID track
(e.g. Uberon, Cell Ontology, Cellosaurus, NCBI Taxonomy) that are not
available by default in Gilda. Default: None
"""
def __init__(
self,
*,
grounder: Optional[Grounder] = None,
equivalences: Optional[Dict[str, Any]] = None,
):
print("using tabulate", tabulate.__version__)
print("Instantiating benchmarker...")
if grounder is None:
grounder = Grounder()
print("Instantiating bio ontology...")
bio_ontology.initialize()
if equivalences is None:
equivalences = {}
available_namespaces = set()
for terms in grounder.entries.values():
for term in terms:
available_namespaces.add(term.db)
self.grounder = grounder
self.equivalences = equivalences
self.available_namespaces = list(available_namespaces)
self.paper_level_grounding = defaultdict(set)
self.processed_data = self._process_annotations_table()
if os.path.exists(TAXONOMY_CACHE_PATH):
with open(TAXONOMY_CACHE_PATH, 'r') as fh:
self.taxonomy_cache = json.load(fh)
else:
self.taxonomy_cache = {}
print('Taxonomy cache length: %s' % len(self.taxonomy_cache))
def get_mappings_tables(self) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Get table showing how goldstandard groundings are being mapped
Namespaces used in the Bioc dataset may only partially overlap with
those used by Gilda. Users may pass in a dictionary of equivalences
mapping groundings used in the Bioc dataset to Gilda's namespaces.
This method generated tables showing how groundings used in the
dataset project onto Gilda's namespaces through these equivalences.
Returns
-------
mapping_table : py:class`pandas.DataFrame`
Rows correspond to namespaces used in the Bioc dataset, columns
to namespaces used in Gilda (automatically populated based on a
Gilda Grounders entries attribute). There is also a row Total
containing the sum of values for all other rows. There are columns
Count, and Total Mapped, showing the total count of entries for
each row namespace, and the total count of entries that could be
mapped to a Gilda namespace respectively.
The same row namespace can be mapped to multiple column namespaces,
causing values in them Total Mapped column to be less than the sum of
values of other columns in the same row. Additionally, in some cases
an entry in the Bioc dataset has multiple curated groundings, causing
the counts not to add up to the number of entries in the dataset.
mapping_table_unique : py:class`pandas.DataFrame`
Similar to mapping table, but counts are given for unique named
entity groundings, ignoring duplication of groundings between rows
in the Bioc dataset.
"""
# Build dataframes for storing information. Values will be filled in
# by looping through rows of the dataset.
index = [get_display_name(ns) for ns in bioc_nmspaces] + ['Total']
columns = (['Count'] +
[get_display_name(ns) for ns in self.available_namespaces] +
['Total Mapped'])
mapping_table = pd.DataFrame(index=index, columns=columns)
mapping_table.fillna(0, inplace=True)
mapping_table_unique = pd.DataFrame(index=index, columns=columns)
mapping_table_unique.fillna(0, inplace=True)
# Maps row namespaces to sets of associated grounding ids
nmspace_ids = defaultdict(set)
# Maps row namespaces to to set of Gilda grounding ids that have
# been mapped to from them
mapped_to_nmspace_ids = defaultdict(set)
# Maps row namespaces to sets of associated grounding ids, but
# only in cases where some mapping exists to a Gilda grounding
mapped_from_nmspace_ids = defaultdict(set)
# Looping through dataframe is costly. There may be a way to write
# this with a clever series of groupbys
for _, row in self.processed_data.iterrows():
# For each row loop through goldstandard groundings. There can
# be more than one
for g1 in row.obj:
# Get the namespace. If it is not one of the namespaces used
# in evaluation, discard and continue to the next iteration
# of the loop
nmspace1 = g1.split(':', maxsplit=1)[0]
if nmspace1 not in bioc_nmspaces:
continue
# Increment total count for this namespace
mapping_table.loc[get_display_name(nmspace1), 'Count'] += 1
# If this particular grounding has not been seen before for
# this namespace increment unique count and mark grounding
# as having been seen
if g1 not in nmspace_ids[nmspace1]:
mapping_table_unique.loc[get_display_name(nmspace1),
'Count'] += 1
nmspace_ids[nmspace1].add(g1)
# Get all of the synonyms that grounding can be mapped to.
# This includes the grounding itself. If a row namespace is
# also a column namespace, we consider this to be a valid
# mapping
synonyms = self.get_synonym_set([g1])
# Track which namespaces have been used so we don't overcount
# when the same grounding can be mapped to multiple groundings
# in the same namespace
used_namespaces = set()
for g2 in synonyms:
nmspace2 = g2.split(':', maxsplit=1)[0]
# If a namespace mapped to is not available in Gilda
# or if we have already tallied a mapping to this namespace
# for this particular row, discard and continue
if nmspace2 not in self.available_namespaces or \
nmspace2 in used_namespaces:
continue
# If Gilda namespace has not been mapped to in the curent
# row increment the count of entries in the namespace with
# a mapping to a Gilda namespace
if not used_namespaces:
mapping_table.loc[get_display_name(nmspace1),
'Total Mapped'] += 1
used_namespaces.add(nmspace2)
# If the grounding g1 has never been mapped to a Gilda
# namespace increment the unique count
if g1 not in mapped_from_nmspace_ids[nmspace1]:
mapping_table_unique. \
loc[get_display_name(nmspace1),
'Total Mapped'] += 1
mapped_from_nmspace_ids[nmspace1].add(g1)
# Increment count for mapping of row namespace to
# column namespace
mapping_table.loc[get_display_name(nmspace1),
get_display_name(nmspace2)] += 1
# If the grounding in column namespace has not been mapped
# to by the grounding in row namespace, increment unique
# count
if g2 not in mapped_to_nmspace_ids[nmspace1]:
mapping_table_unique. \
loc[get_display_name(nmspace1),
get_display_name(nmspace2)] += 1
mapped_to_nmspace_ids[nmspace1].add(g2)
# Generate total rows
mapping_table.loc['Total', :] = mapping_table.sum()
mapping_table_unique.loc['Total', :] = mapping_table_unique.sum()
mapping_table.reset_index(inplace=True)
mapping_table.rename({'index': 'Namespace'}, inplace=True)
mapping_table_unique.reset_index(inplace=True)
mapping_table_unique.rename({'index': 'Namespace'}, inplace=True)
return mapping_table, mapping_table_unique
def _process_annotations_table(self):
"""Extract relevant information from annotations table."""
print("Extracting information from annotations table...")
df = MODULE.ensure_tar_df(
url=URL,
inner_path='BioIDtraining_2/annotations.csv',
read_csv_kwargs=dict(sep=',', low_memory=False),
)
# Split entries with multiple groundings then normalize ids
df.loc[:, 'obj'] = df['obj'].apply(self._normalize_ids)
# Add synonyms of gold standard groundings to help match more things
df.loc[:, 'obj_synonyms'] = df['obj'].apply(self.get_synonym_set)
# Create column for entity type
df.loc[:, 'entity_type'] = df.apply(self._get_entity_type_helper, axis=1)
processed_data = df[['text', 'obj', 'obj_synonyms', 'entity_type',
'don_article']]
print("%d rows in processed annotations table." % len(processed_data))
processed_data = processed_data[processed_data.entity_type
!= 'unknown']
print("%d rows in annotations table without unknowns." %
len(processed_data))
for don_article, text, synonyms in df[['don_article', 'text',
'obj_synonyms']].values:
self.paper_level_grounding[don_article, text].update(synonyms)
return processed_data
def _get_entity_type_helper(self, row) -> str:
if self._get_entity_type(row.obj) != 'Gene':
return self._get_entity_type(row.obj)
elif any(y.startswith('HGNC') for y in row.obj_synonyms):
return 'Human Gene'
else:
return 'Nonhuman Gene'
def ground_entities_with_gilda(self, context=True):
"""Compute gilda groundings of entity texts in corpus
Adds two columns to the internal dataframe for groundings with
and without context based disambiguation.
"""
df = self.processed_data
tqdm.write("Grounding no-context corpus with Gilda...")
df.loc[:, 'groundings_no_context'] = df.text. \
progress_apply(self._get_grounding_list)
if context:
tqdm.write("Grounding with-context corpus with Gilda...")
# use from tqdm.contrib.concurrent import thread_map
df.loc[:, 'groundings'] = df. \
progress_apply(self._get_row_grounding_list, axis=1)
else:
tqdm.write("Skipping grounding with context.")
df.loc[:, 'groundings'] = df.groundings_no_context
tqdm.write("Finished grounding corpus with Gilda...")
self._evaluate_gilda_performance()
def _get_row_grounding_list(self, row):
return self._get_grounding_list(
row.text,
context=self._get_plaintext(row.don_article),
organisms=self._get_organism_priority(row.don_article),
)
@lru_cache(maxsize=None)
def _get_plaintext(self, don_article: str) -> str:
"""Get plaintext content from XML file in BioID corpus
Parameters
----------
don_article :
Identifier for paper used within corpus.
Returns
-------
:
Plaintext of specified article
"""
directory = MODULE.ensure_untar(url=URL, directory='BioIDtraining_2')
path = directory.joinpath('BioIDtraining_2', 'fulltext_bioc',
f'{don_article}.xml')
tree = etree.parse(path.as_posix())
paragraphs = tree.xpath('//text')
paragraphs = [' '.join(text.itertext()) for text in paragraphs]
return '/n'.join(paragraphs) + '/n'
def _get_organism_priority(self, don_article):
don_article | |
<reponame>HagaiHargil/python-ca-analysis-bloodflow<filename>calcium_bflow_analysis/calcium_over_time.py<gh_stars>0
"""
A module designed to analyze FOVs of in vivo calcium
activity. This module's main class, :class:`CalciumOverTime`,
is used to run
"""
from enum import Enum
from pathlib import Path
from collections import defaultdict
import itertools
from typing import Tuple, List, Optional, Dict
import pandas as pd
import xarray as xr
import numpy as np
import attr
from attr.validators import instance_of
from calcium_bflow_analysis.fluo_metadata import FluoMetadata
from calcium_bflow_analysis.analog_trace import AnalogAcquisitionType
from calcium_bflow_analysis.single_fov_analysis import SingleFovParser
class Epoch(Enum):
"""
All possible TAC epoch combinations
"""
ALL = "all"
RUN = "run"
STAND = "stand"
STIM = "stim"
JUXTA = "juxta"
SPONT = "spont"
RUN_STIM = "run_stim"
RUN_JUXTA = "run_juxta"
RUN_SPONT = "run_spont"
STAND_STIM = "stand_stim"
STAND_JUXTA = "stand_juxta"
STAND_SPONT = "stand_spont"
class FormatFinder:
"""A generic class to find files in a folder with a given glob string.
This class can be instantiated once per file format and then passed to the
FileFinder object.
"""
def __init__(self, name: str, glob: str) -> None:
self.name = name
self.glob = glob
def find_file(self, folder: Path, filename: str) -> Optional[Path]:
"""Main method designed to check whether this file exists in the given
path. It will use the glob string to recursively check for files in
the given directory.
"""
try:
fname = next(folder.rglob(filename + self.glob))
except StopIteration:
return None
else:
return fname
@attr.s(slots=True)
class FileFinder:
"""
A class designated to find all corresponding files
for a given FOV. This means that each tif file that represents
a recorded field of view should always have a corresponding
.npz file containing the results from the calcium analysis
pipeline, and if analog data was recorded then this FOV also
has a .txt file to go along with it. It may have other accompanying
data files as well. This class is aimed at finding these "siblings"
and returning them to other classes for further processing.
"""
results_folder = attr.ib(validator=instance_of(Path))
file_formats = attr.ib(validator=instance_of(list))
folder_globs = attr.ib(default={Path("."): "*.tif"}, validator=instance_of(dict))
data_files = attr.ib(init=False)
def find_files(self) -> Optional[pd.DataFrame]:
"""
Main entrance to pipeline of class. Returns a DataFrame in which
each row is a doublet\\triplet of corresponding files.
"""
all_found_files = self._find_all_relevant_files()
if all_found_files:
self.data_files = self._make_table(all_found_files)
return self.data_files
return None
def _find_all_relevant_files(self) -> Optional[Dict[str, List[Path]]]:
"""
Passes each .tif file it finds (with the given glob string)
and looks for its results, analog and colabeled friends.
If it can't find the friends it skips this file, else it adds
them into a list. A list None is returned if this
experiment had no colabeling or analog data associated with it.
"""
format_names = list(fmt.name for fmt in self.file_formats) + ['tif']
all_found_files = {fmt: [] for fmt in format_names}
siblings = {fmt: Path() for fmt in format_names}
if len(all_found_files) == 0:
return
for folder, globstr in self.folder_globs.items():
for file in folder.rglob(globstr):
fname = file.stem
already_analyzed = self._assert_file_wasnt_analyzed(folder, fname)
if already_analyzed:
continue
for fileformat in self.file_formats:
found_file = fileformat.find_file(folder, fname)
if found_file:
siblings[fileformat.name] = found_file
else:
break
else: # No break occurred - we found all needed files
siblings['tif'] = file
[
all_found_files[name].append(found_file)
for name, found_file in siblings.items()
]
return all_found_files
@staticmethod
def _assert_file_wasnt_analyzed(folder, fname) -> bool:
try:
next(folder.rglob(f"{fname}.nc"))
except StopIteration:
return False
else:
print(f"File {fname} was already analyzed")
return True
@staticmethod
def _make_table(all_found_files: Dict[str, List[Path]]) -> pd.DataFrame:
"""
Turns list of pathlib.Path objects into a DataFrame.
"""
columns = all_found_files.keys()
data_files = pd.DataFrame([], columns=columns)
files_iter = zip(*all_found_files.values())
for idx, files_tup in enumerate(files_iter):
cur_row = pd.DataFrame([files_tup], columns=columns, index=[idx])
data_files = data_files.append(cur_row)
return data_files
@attr.s(slots=True, hash=True)
class CalciumAnalysisOverTime:
""" Analysis class that parses the output of CaImAn "results.npz" files.
Usage: run the "run_batch_of_timepoints" method, which will go over all FOVs
that were recorded in this experiment.
"folder_globs" is a dictionary of folder name and glob strings, which allows
you to analyze several directories of data, each with its own glob pattern.
If serialize is True, it will write to disk each FOV's DataArray, as well
as the concatenated DataArray to make future processing faster.
If you've already serialized your data, use "generate_ds_per_day" to continue
the downstream analysis of your files by concatenating all relevant files into
one large database which can be analyzed with downstream scripts that may be
found in "calcium_trace_analysis.py".
"""
files_table = attr.ib(validator=instance_of(pd.DataFrame))
serialize = attr.ib(default=False, validator=instance_of(bool))
folder_globs = attr.ib(default={Path("."): "*.tif"}, validator=instance_of(dict))
analog = attr.ib(
default=AnalogAcquisitionType.NONE, validator=instance_of(AnalogAcquisitionType)
)
regex = attr.ib(default=attr.Factory(dict), validator=instance_of(dict))
list_of_fovs = attr.ib(init=False)
concat = attr.ib(init=False)
def run_batch_of_timepoints(self, results_folder):
"""
Main method to analyze all FOVs in all timepoints in all experiments.
Generally used for TAC experiments, which have multiple FOVs per mouse, and
an experiment design which spans multiple days.
The script expects a filename containing the following "self.fov_analysis_files.append(fields)":
* Mouse ID (digits at the beginning of filename)
* Either 'HYPER' or 'HYPO'
* 'DAY_0/1/n'
* 'FOV_n'
After creating a xr.Dataset out of each file, the script will write this DataArray to
disk (only if it doesn't exist yet, and only if self.serialize is True) to make future processing faster.
Finally, it will take all created DataArrays and concatenate them into a single DataArray,
that can also be written to disk using the "serialize" attribute.
The `**regex` kwargs-like parameter is used to manually set the regex
that will parse the metadata from the file name. The default regexes are
described above. Valid keys are "id_reg", "fov_reg", "cond_reg" and "day_reg".
"""
# Multiprocessing doesn't work due to the fact that not all objects are
# pickleable
# with mp.Pool() as pool:
# self.list_of_fovs = pool.map(
# self._mp_process_timepoints, self.files_table.itertuples(index=False)
# )
self.list_of_fovs = []
for row in self.files_table.itertuples():
self.list_of_fovs.append(self._mp_process_timepoints(row))
self.generate_ds_per_day(results_folder)
def _mp_process_timepoints(self, files_row: Tuple):
"""
A function for a single process that takes three conjugated files - i.e.
three files that belong to the same recording, and processes them.
"""
print(f"Parsing {files_row.tif}")
fov = self._analyze_single_fov(files_row, analog=self.analog, **self.regex)
return str(fov.metadata.fname)[:-4] + ".nc"
def _analyze_single_fov(
self, files_row, analog=AnalogAcquisitionType.NONE, **regex
):
""" Helper function to go file by file, each with its own fluorescence and
possibly analog data, and run the single FOV parsing on it """
meta = FluoMetadata(files_row.tif, num_of_channels=2, **regex)
meta.get_metadata()
fov = SingleFovParser(
analog_fname=files_row.analog,
results_fname=files_row.caiman,
colabeled=files_row.colabeled if "colabeled" in files_row._fields else False,
results_hdf5=files_row.hdf5,
metadata=meta,
analog=analog,
summarize_in_plot=True,
)
fov.parse()
if self.serialize:
fov.add_metadata_and_serialize()
return fov
def generate_ds_per_day(
self, results_folder: Path, globstr="*FOV*.nc", recursive=True
):
"""
Parse .nc files that were generated from the previous analysis
and chain all "DAY_X" Datasets together into a single list.
This list is then concatenated in to a single DataArray, creating a
large data structure for each experimental day.
If we arrived here from "run_batch_of_timepoints()", the data is already
present in self.list_of_fovs. Otherwise, we have to manually find the
files using a default glob string that runs on each folder in
self.folder_globs.
Saves all day-data into self.results_folder.
"""
fovs_by_day = defaultdict(list)
try: # coming from run_batch_of_timepoints()
all_files = self.list_of_fovs
except AttributeError:
if recursive:
all_files = [folder.rglob(globstr) for folder in self.folder_globs]
else:
all_files = [folder.glob(globstr) for folder in self.folder_globs]
all_files = itertools.chain(*all_files)
for file in all_files:
if (
"NEW_crystal_skull_TAC_161018" in str(file)
or "crystal_skull_TAC_180719" in str(file)
or "602_HYPER_HYPO_DAY_0_AND_ALL" in str(file)
):
continue
print(file)
try:
day = int(xr.open_dataset(file).day)
except AttributeError: # older datasets
continue
except FileNotFoundError: # no calcium in FOV
continue
fovs_by_day[day].append(file)
self._concat_fovs(fovs_by_day, results_folder)
def _concat_fovs(self, fovs_by_day: dict, results_folder: Path):
"""
Take the list of FOVs and turn them into a single DataArray. Lastly it will
write this DataArray to disk.
fovs_by_day: Dictionary with its keys being the days of experiment (0, 1, ...) and
values as a list of filenames.
"""
print("Concatenating all FOVs...")
fname_to_save = "data_of_day_"
for day, file_list in fovs_by_day.items():
try:
file = next(results_folder.glob(fname_to_save + str(day) + ".nc"))
print(f"Found {str(file)}, not concatenating")
except StopIteration: # .nc file doesn't exist
print(f"Concatenating day {day}")
data_per_day = []
for file in file_list:
try:
data_per_day.append(xr.open_dataset(file).load())
except FileNotFoundError:
pass
concat = xr.concat(data_per_day, dim="fname")
# Fix datatype conversion of epoch_times to floats:
asbool = concat["epoch_times"].data.astype(np.bool)
nans = np.where(np.isnan(concat["epoch_times"].data))
asbool[nans] = False
concat["epoch_times"] = (["fname", "epoch", "time"], asbool)
self.concat = concat
concat.to_netcdf(
str(results_folder / f"{fname_to_save + str(day)}.nc"), mode="w",
)
| |
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_float_with_na(self):
in_data = self.decimal_floats + [None]
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_complex_no_na(self):
in_data = self.decimal_floats
out_data = [complex(f, 0) for f in self.decimal_floats]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_complex_with_na(self):
in_data = self.decimal_floats + [None]
out_data = [complex(f, 0) for f in self.decimal_floats] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_string_no_na(self):
in_data = self.decimal_floats
out_data = [str(f) for f in self.decimal_floats]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_string_with_na(self):
in_data = self.decimal_floats + [None]
out_data = [str(f) for f in self.decimal_floats] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_generic_float_to_boolean_no_na(self):
in_data = self.decimal_floats
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_generic_float_to_boolean_with_na(self):
in_data = self.decimal_floats + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_float_bool_flag_to_boolean_no_na(self):
in_data = self.bool_flags
out_data = [bool(f) for f in self.bool_flags]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_float_bool_flag_to_boolean_with_na(self):
in_data = self.bool_flags + [None]
out_data = [bool(f) for f in self.bool_flags] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_decimal_float_between_0_and_1_to_boolean_no_na(self):
in_data = self.decimal_floats_between_0_and_1
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_decimal_float_between_0_and_1_to_boolean_with_na(self):
in_data = self.decimal_floats_between_0_and_1 + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_float_to_datetime_no_na(self):
in_data = self.decimal_floats
out_data = [datetime.fromtimestamp(f, tz=timezone.utc)
for f in self.decimal_floats]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_datetime_with_na(self):
in_data = self.decimal_floats + [None]
out_data = [datetime.fromtimestamp(f, tz=timezone.utc)
for f in self.decimal_floats] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_timedelta_no_na(self):
in_data = self.decimal_floats
out_data = [timedelta(seconds=f) for f in self.decimal_floats]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_timedelta_with_na(self):
in_data = self.decimal_floats + [None]
out_data = [timedelta(seconds=f) for f in self.decimal_floats] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_object_no_na(self):
in_series = pd.Series(self.decimal_floats)
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_object_with_na(self):
in_series = pd.Series(self.decimal_floats + [None])
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
class CoerceComplexDtypeTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
random.seed(12345)
size = 3
cls.real_whole_complex = [complex(-1 * size // 2 + i + 1.0, 0)
for i in range(size)]
# ^ = [..., complex(-1, 0), complex(0, 0), complex(1, 0), ...]
cls.real_complex = [complex(-1 * size // 2 + i + 1 + random.random(), 0)
for i in range(size)]
# ^ = [..., complex(-1+e, 0), complex(0+e, 0), complex(1+e, 0), ...]
cls.real_complex_between_0_and_1 = [complex(random.random(), 0)
for _ in range(size)]
# ^ = [complex(0.xxxx, 0), complex(0.xxxx, 0), complex(0.xxxx, 0), ...]
cls.imag_complex = [complex(-1 * size // 2 + i + 1 + random.random(),
-1 * size // 2 + i + 1 + random.random())
for i in range(size)]
# ^ = [..., complex(-1+e,-1+e), complex(0+e,0+e), complex(1+e,1+e), ...]
cls.bool_flags = [complex((i + 1) % 2, 0) for i in range(size)]
# ^ = [complex(1, 0), complex(0, 0), complex(1, 0), complex(0, 0), ...]
cls.col_name = "complex"
def test_coerce_from_real_whole_complex_to_integer_no_na(self):
in_data = self.real_whole_complex
out_data = [int(c.real) for c in self.real_whole_complex]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_real_whole_complex_to_integer_with_na(self):
in_data = self.real_whole_complex + [None]
out_data = [int(c.real) for c in self.real_whole_complex] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_real_decimal_complex_to_integer_no_na(self):
in_data = self.real_complex
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {int} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: int})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_real_decimal_complex_to_integer_with_na(self):
in_data = self.real_complex + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {int} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: int})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_imaginary_complex_to_integer_no_na(self):
in_data = self.imag_complex
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to | |
import matplotlib
from maskgen.maskgen_loader import MaskGenLoader
from maskgen.ui.semantic_frame import SemanticFrame
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
matplotlib.use("TkAgg")
import logging
from matplotlib.figure import Figure
from Tkinter import *
import matplotlib.patches as mpatches
import ttk
import tkMessageBox
from PIL import ImageTk
from maskgen.support import getValue
from maskgen.tool_set import imageResizeRelative, openImage,get_username, GrayBlockOverlayGenerator, compose_overlay_name
import os
import numpy as np
import maskgen.qa_logic
from maskgen.video_tools import get_end_time_from_segment
import maskgen.tool_set
import random
import maskgen.scenario_model
from maskgen.services.probes import ProbeGenerator, DetermineTaskDesignation, fetch_qaData_designation, cleanup_temporary_files
import maskgen.validation
from maskgen.tool_set import openFile
import webbrowser
from maskgen.graph_meta_tools import MetaDataExtractor
class Chkbox:
def __init__(self, parent, dialog, label=None, command=None, value=False):
self.value = BooleanVar(value=value)
self.box = Checkbutton(parent, variable=self.value, command=dialog.check_ok if command is None else command)
self.label = label
def __nonzero__(self):
return self.value.get()
def set_value(self, value):
self.value.set(value=value)
def grid_info(self):
return self.box.grid_info()
def grid(self):
self.label.grid()
self.box.grid()
def grid_remove(self):
self.box.grid_remove()
self.label.grid_remove()
class CheckboxGroup:
"""
boxes: list of wrapped Checkboxes
condition: either 'all'- all checkboxes in the group must be true or 'any'- any true value will return true.
"""
def __init__(self, boxes = [], condition = 'all'):
self.boxes = boxes
self.condition = condition
def __nonzero__(self):
if len(self.boxes) == 0:
return True
if self.condition == 'any':
return any(bool(value) for value in self.boxes)
else:
return all(bool(value) for value in self.boxes)
def hide_group(self):
for ck in self.boxes:
ck.grid_remove()
def show_group(self):
for ck in self.boxes:
ck.grid()
def grid_info(self, index = -1):
"""
Get the grid_info of the checkbox at the index. default is last index
:return:
"""
return self.boxes[index].grid_info() if len(self.boxes) > 0 else {}
class MannyPage(Frame):
"""
Displays mascot with instructions and status information on probe and QA page generation.
"""
checkboxes = CheckboxGroup()
manny_colors = [[155, 0, 0], [0, 155, 0], [0, 0, 155], [153, 76, 0], [96, 96, 96], [204, 204, 0], [160, 160, 160]]
def __init__(self, master):
Frame.__init__(self, master)
self.statusLabelText = StringVar()
self.statusLabelText.set('Probes Generating')
self.heading = Label(self, text="Welcome to the QA Wizard. Press Next to begin the QA Process or Quit to stop. This is "
"Manny; He is here to help you analyze the journal. The tool is currently generating the probes. "
"This could take a while. When the next button is enabled you may begin.",
wraplength=400)
self.heading.grid(column=0, row=0, rowspan=2, columnspan=2)
manny_color = maskgen.tool_set.get_icon('Manny_icon_color.jpg')
manny_mask = maskgen.tool_set.get_icon('Manny_icon_mask.jpg')
self.mannyFrame = Frame(self)
self.mannyFrame.grid(column=0, row=2, columnspan=2)
self.canvas = Canvas(self.mannyFrame, width=510, height=510)
self.canvas.pack()
manny_img = openImage(manny_color)
manny_img_mask = openImage(manny_mask).to_mask()
manny_img_mask = imageResizeRelative(manny_img_mask, (500, 500), manny_img_mask.size)
self.manny = ImageTk.PhotoImage(
imageResizeRelative(manny_img, (500, 500), manny_img.size).overlay(manny_img_mask,self.manny_colors[
random.randint(0, len(self.manny_colors) - 1)]).toPIL())
self.image_on_canvas = self.canvas.create_image(510 / 2, 510 / 2, image=self.manny, anchor=CENTER, tag='things')
self.statusLabelObject = Label(self, textvariable=self.statusLabelText)
self.statusLabelObject.grid(column=0, row=3, columnspan=2, sticky=E + W)
self.canvas.bind("<Double-Button-1>", master.help)
self.wquit = Button(self, text='Quit', command=master.exitProgram, width=20)
self.wquit.grid(column=0, row=4, sticky=W, padx=5, pady=5)
self.wnext = Button(self, text='Next', command=master.nex, state=DISABLED, width=20)
self.wnext.grid(column=1, row=4, sticky=E, padx=5, pady=5)
class FinalPage(Frame):
"""
Final QA page, handles comments, final approval.
"""
def __init__(self, master):
Frame.__init__(self, master)
row = 0
col = 0
self.infolabel = Label(self, justify=LEFT, text='QA Checklist:').grid(row=row, column=col)
row += 1
qa_list = [
'Base and terminal node images should be the same format. -If the base was a JPEG, the Create JPEG/TIFF option should be used as the last step.',
'All relevant semantic groups are identified.']
self.checkboxes = CheckboxGroup(boxes=[])
for q in qa_list:
box_label = Label(self, text=q, wraplength=600, justify=LEFT)
ck = Chkbox(parent=self, dialog=master, label=box_label, value=master.qaData.get_state())
ck.box.grid(row=row, column=col)
ck.label.grid(row=row, column=col + 1, sticky='W')
self.checkboxes.boxes.append(ck)
row += 1
master.checkboxes[master.current_qa_page] = self.checkboxes
if len(self.master.errors) > 1:
Label(self, text='Probes were generated with errors. They can be reviewed, but QA cannot be accepted. Check the log for errors.').grid(row=row, column=col+1)
row += 1
Label(self, text='QA Signoff: ').grid(row=row, column=col)
col += 1
self.reporterStr = StringVar()
self.reporterStr.set(get_username())
self.reporterEntry = Entry(self, textvar=self.reporterStr)
self.reporterEntry.grid(row=row, column=col, columnspan=3, sticky='W')
row += 2
col -= 1
self.acceptButton = Button(self, text='Accept', command=lambda: master.qa_done('yes'), width=15,
state=DISABLED)
self.acceptButton.grid(row=row, column=col + 2, columnspan=2, sticky='W')
self.rejectButton = Button(self, text='Reject', command=lambda: master.qa_done('no'), width=15)
self.rejectButton.grid(row=row, column=col + 1, columnspan=1, sticky='E')
self.previButton = Button(self, text='Previous', command=master.pre, width=15)
self.previButton.grid(row=row, column=col, columnspan=2, sticky='W')
row += 1
self.commentsLabel = Label(self, text='Comments: ')
self.commentsLabel.grid(row=row, column=col, columnspan=3)
row += 1
textscroll = Scrollbar(self)
textscroll.grid(row=row, column=col + 4, sticky=NS)
self.commentsBox = Text(self, height=5, width=100, yscrollcommand=textscroll.set, relief=SUNKEN)
self.commentsBox.grid(row=row, column=col, padx=5, pady=5, columnspan=3, sticky=NSEW)
textscroll.config(command=self.commentsBox.yview)
currentComment = master.parent.scModel.getProjectData('qacomment')
self.commentsBox.insert(END, currentComment) if currentComment is not None else ''
class QAPage(Frame):
"""
A standard QA Page, allows review and user validation of probe spatial, temporal aspects
"""
#TODO: Refactor to put page data with the page.
"""
subplots = []
pltdata = []
successIcon = None
displays = []
pathboxes = []
"""
def __init__(self, master, link):
Frame.__init__(self, master=master)
self.master = master
self.link = link
self.checkboxes = CheckboxGroup(boxes=[])
#Find this probe- could probably do this elsewhere and pass it in.
self.edgeTuple = tuple(link.split("<-"))
if len(self.edgeTuple) < 2:
self.finalNodeName = link.split("->")[1]
self.edgeTuple = tuple(link.split("->"))
else:
self.finalNodeName = None
if (len(link.split('->'))>1):
probe = [probe for probe in master.probes if
probe.edgeId[1] in master.lookup[self.edgeTuple[0]] and probe.finalNodeId in master.lookup[self.edgeTuple[1]]][0]
else:
probe = \
[probe for probe in master.probes if
probe.edgeId[1] in master.lookup[self.edgeTuple[0]] and probe.donorBaseNodeId in
master.lookup[
self.edgeTuple[1]]][0]
self.probe = probe
iFrame = Frame(self)
c = Canvas(iFrame, width=35, height=35)
c.pack()
#Success Icon
img = openImage(maskgen.tool_set.get_icon('RedX.png') if probe.failure else maskgen.tool_set.get_icon('check.png'))
self.successIcon = ImageTk.PhotoImage(imageResizeRelative(img, (30, 30), img.size).toPIL())
c.create_image(15, 15, image=self.successIcon, anchor=CENTER, tag='things')
#Layout
row = 0
col = 0
self.optionsLabel = Label(self, text=self.link, font=(None, 10))
self.optionsLabel.grid(row=row, columnspan=3, sticky='EW', padx=(40, 0), pady=10)
iFrame.grid(column=0, row=0, columnspan=1, sticky=W)
row += 1
self.operationVar = StringVar(value="Operation [ Semantic Groups ]:")
self.operationLabel = Label(self, textvariable=self.operationVar, justify=LEFT)
self.semanticFrame = SemanticFrame(self)
self.semanticFrame.grid(row=row + 1, column=0, columnspan=2, sticky=N + W, rowspan=1, pady=10)
row += 2
#cImageFrame is used for plot, image and overlay
self.cImgFrame = ttk.Notebook(self)
self.cImgFrame.bind('<<NotebookTabChanged>>', lambda a: self.frameMove())
self.cImgFrame.grid(row=row, rowspan=8)
self.descriptionVar = StringVar()
self.descriptionLabel = Label(self, textvariable=self.operationVar, justify=LEFT)
row += 8
self.operationLabel.grid(row=row, columnspan=3, sticky='W', padx=10)
row += 1
textscroll = Scrollbar(self)
textscroll.grid(row=row, column=col + 1, sticky=NS)
self.commentBox = Text(self, height=5, width=80, yscrollcommand=textscroll.set, relief=SUNKEN)
self.master.commentsBoxes[self.link] = self.commentBox
self.commentBox.grid(row=row, column=col, padx=5, pady=5, columnspan=1, rowspan=2, sticky=NSEW)
textscroll.config(command=self.commentBox.yview)
col = 3
row = 0
scroll = Scrollbar(self)
scroll.grid(row=row, column=col + 2, rowspan=5, columnspan=1, sticky=NS)
self.pathList = Listbox(self, width=30, yscrollcommand=scroll.set, selectmode=EXTENDED, exportselection=0)
self.pathList.grid(row=row, column=col - 1, rowspan=5, columnspan=3, padx=(30, 10), pady=(20, 20))
self.master.pathboxes[self] = self.semanticFrame.getListbox()
scroll.config(command=self.pathList.yview)
self.transitionVar = StringVar()
edge = master.scModel.getGraph().get_edge(probe.edgeId[0], probe.edgeId[1])
self.operationVar.set(self.operationVar.get() + master._compose_label(edge))
master.edges[self] = [edge, self.semanticFrame.getListbox()]
for sg in edge['semanticGroups'] if 'semanticGroups' in edge else []:
self.semanticFrame.insertListbox(ANCHOR, sg)
operation = master.scModel.getGroupOperationLoader().getOperationWithGroups(edge['op'])
#QA checkboxes
if operation.qaList is not None:
args = getValue(edge, 'arguments', {})
self.curOpList = [x for x in operation.qaList]
for item_pos in range(len(self.curOpList)):
item = self.curOpList[item_pos]
try:
self.curOpList[item_pos] = item.format(**args)
except:
pass
else:
self.curOpList = []
row += 5
if self.curOpList is None:
master.qaData.set_qalink_status(self.link, 'yes')
for q in self.curOpList:
box_label = Label(self, text=q, wraplength=250, justify=LEFT)
ck = Chkbox(parent=self, dialog=master, label=box_label, value=master.qaData.get_qalink_status(link=link))
ck.box.grid(row=row, column=col - 1)
ck.label.grid(row=row, column=col, columnspan=4, sticky='W')
self.checkboxes.boxes.append(ck)
row += 1
master.checkboxes[self] = self.checkboxes
# Main Features- load the overlay for images, load plot graph & overlay page for videos
if ('<-' in self.link and probe.donorVideoSegments is None) or probe.targetVideoSegments is None:
self.load_overlay(initialize=True)
else:
self.transitionString(None)
self.setUpFrames()
#Comment section
currentComment = master.qaData.get_qalink_caption(self.link)
self.commentBox.delete(1.0, END)
self.commentBox.insert(END, currentComment if currentComment is not None else '')
#Navigation Buttons
self.acceptButton = Button(self, text='Next', command=master.nex, width=15)
self.acceptButton.grid(row=12, column=col + 2, columnspan=2, sticky='E', padx=(20, 20))
self.prevButton = Button(self, text='Previous', command=master.pre, width=15)
self.prevButton.grid(row=12, column=col - 1, columnspan=2, sticky='W', padx=(20, 20))
self.acceptnButton = Button(self, text='Next Unchecked', command=master.nexCheck, width=15)
self.acceptnButton.grid(row=13, column=col + 2, columnspan=2, sticky='E', padx=(20, 20))
self.prevnButton = Button(self, text='Previous Unchecked', command=master.preCheck, width=15)
self.prevnButton.grid(row=13, column=col - 1, columnspan=2, sticky='W', padx=(20, 20))
row = 14
#Progress Bar
pb = ttk.Progressbar(self, orient='horizontal', mode='determinate', maximum=100.0001)
pb.grid(row=row, column=0, sticky=EW, columnspan=8)
pb.step(master.progress * 100)
master.progressBars.append(pb)
def setUpFrames(self):
"""
Lays out inner display for video temporal and spatial review
:return:
"""
displays = [TemporalReviewDisplay(self)]
if any(segment.filename != None for segment in self.probe.targetVideoSegments):
displays.append(SpatialReviewDisplay(self))
self.checkboxes.boxes.append(CheckboxGroup(boxes=[d.checkbox for d in displays], condition='any'))
self.master.pageDisplays[self] = [0, displays]
def _add_to_listBox(self, box, string):
if len(string) < 20:
box.insert(END, string)
return 1
box.insert(END, string[0:15]+"...")
box.insert(END, " " + string[max(15-int(len(string)),-10):])
return 2
def transitionString(self, probeList):
tab = " "
current = 0
c = 0
if self.finalNodeName == None:
self._add_to_listBox(self.pathList, self.edgeTuple[1])
self.pathList.insert(END, 2*tab + "|")
self.pathList.insert(END, tab + "Donor")
self.pathList.insert(END, 2*tab + "|")
self.pathList.insert(END, 2*tab | |
<gh_stars>1-10
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import mock
import os
import sys
import unittest2
os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.utils import setting_wrapper as setting
reload(setting)
from base import BaseTest
from compass.db.api import adapter as adapter_api
from compass.db.api import adapter_holder as adapter
from compass.db.api import cluster
from compass.db.api import database
from compass.db.api import host
from compass.db.api import machine
from compass.db.api import metadata as metadata_api
from compass.db.api import metadata_holder as metadata
from compass.db.api import network
from compass.db.api import switch
from compass.db.api import user as user_api
from compass.db import exception
from compass.utils import flags
from compass.utils import logsetting
from compass.utils import util
class ClusterTestCase(unittest2.TestCase):
"""Cluster base test case."""
def setUp(self):
super(ClusterTestCase, self).setUp()
os.environ['COMPASS_IGNORE_SETTING'] = 'true'
os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data'
)
reload(setting)
database.init('sqlite://')
database.create_db()
adapter.load_adapters(force_reload=True)
metadata.load_metadatas(force_reload=True)
adapter.load_flavors(force_reload=True)
self.user_object = (
user_api.get_user_object(
setting.COMPASS_ADMIN_EMAIL
)
)
self.adapter_id = None
self.os_id = None
self.flavor_id = None
self.cluster_id = None
# get adapter information
list_adapters = adapter.list_adapters(user=self.user_object)
for list_adapter in list_adapters:
for supported_os in list_adapter['supported_oses']:
self.os_id = supported_os['os_id']
break
if list_adapter['flavors']:
details = list_adapter['flavors']
for detail in details:
if detail['display_name'] == 'allinone':
roles = detail['roles']
for role in roles:
self.adapter_id = role['adapter_id']
self.flavor_id = role['flavor_id']
break
# add cluster
cluster_names = ['test_cluster1', 'test_cluster2']
for cluster_name in cluster_names:
cluster.add_cluster(
user=self.user_object,
adapter_id=self.adapter_id,
os_id=self.os_id,
flavor_id=self.flavor_id,
name=cluster_name
)
clusters = cluster.list_clusters(user=self.user_object)
self.roles = None
for list_cluster in clusters:
for item in list_cluster['flavor']['roles']:
self.roles = item
if list_cluster['name'] == 'test_cluster1':
self.cluster_id = list_cluster['id']
break
self.package_configs = {
'security': {
'service_credentials': {
'$service': {
'username': 'root',
'password': '<PASSWORD>'
}
},
'console_credentials': {
'$console': {
'username': 'root',
'password': '<PASSWORD>'
}
}
},
'network_mapping': {
'$interface_type': {
'interface': 'eth0',
'subnet': '10.145.88.0/23'
}
}
}
self.os_configs = {
'general': {
'language': 'EN',
'timezone': 'UTC',
'http_proxy': 'http://127.0.0.1:3128',
'https_proxy': 'http://127.0.0.1:3128',
'no_proxy': [
'127.0.0.1',
'compass'
],
'ntp_server': '127.0.0.1',
'dns_servers': [
'127.0.0.1'
],
'domain': 'ods.com',
'search_path': [
'ods.com'
],
'default_gateway': '127.0.0.1',
},
'server_credentials': {
'username': 'root',
'password': '<PASSWORD>',
},
'partition': {
'/var': {
'max_size': '100G',
'percentage': 10,
'size': '1G'
}
}
}
# add cluster config
cluster.update_cluster_config(
self.cluster_id,
user=self.user_object,
os_config=self.os_configs,
package_config=self.package_configs
)
# add switch
switch.add_switch(
user=self.user_object,
ip='172.29.8.40'
)
switches = switch.list_switches(user=self.user_object)
self.switch_id = None
for item in switches:
self.switch_id = item['id']
macs = ['28:6e:d4:46:c4:25', '00:0c:29:bf:eb:1d']
for mac in macs:
switch.add_switch_machine(
self.switch_id,
user=self.user_object,
mac=mac,
port='1'
)
# get machine information
machines = machine.list_machines(user=self.user_object)
self.machine_ids = []
for item in machines:
self.machine_ids.append(item['id'])
# add cluster host
name = ['newname1', 'newname2']
for i in range(0, 2):
cluster.add_cluster_host(
self.cluster_id,
user=self.user_object,
machine_id=self.machine_ids[i],
name=name[i]
)
self.host_id = []
self.clusterhost_id = []
clusterhosts = cluster.list_clusterhosts(user=self.user_object)
for clusterhost in clusterhosts:
self.host_id.append(clusterhost['host_id'])
self.clusterhost_id.append(clusterhost['clusterhost_id'])
# add log file
file_names = ['log_file1', 'log_file2']
for file_name in file_names:
cluster.add_cluster_host_log_history(
self.cluster_id,
self.host_id[0],
user=self.user_object,
filename=file_name
)
# add subnet
subnets = ['10.145.88.0/23', '192.168.100.0/23']
for subnet in subnets:
network.add_subnet(
user=self.user_object,
subnet=subnet
)
list_subnet = network.list_subnets(
user=self.user_object
)
self.subnet_ids = []
for item in list_subnet:
self.subnet_ids.append(item['id'])
# add host network
host.add_host_network(
self.host_id[0],
user=self.user_object,
interface='eth0',
ip='10.145.88.0',
subnet_id=self.subnet_ids[0],
is_mgmt=True
)
host.add_host_network(
self.host_id[0],
user=self.user_object,
interface='eth1',
ip='10.145.88.10',
subnet_id=self.subnet_ids[0],
is_promiscuous=True
)
host.list_host_networks(
self.host_id[0],
user=self.user_object,
)
def tearDown(self):
super(ClusterTestCase, self).tearDown()
class TestListClusters(ClusterTestCase):
"""Test list clusters."""
def setUp(self):
super(TestListClusters, self).setUp()
def tearDown(self):
super(TestListClusters, self).tearDown()
def test_list_clusters(self):
clusters = cluster.list_clusters(user=self.user_object)
result = []
for list_cluster in clusters:
result.append(list_cluster['name'])
expects = ['test_cluster1', 'test_cluster2']
self.assertIsNotNone(clusters)
for expect in expects:
self.assertIn(expect, result)
class TestGetCluster(ClusterTestCase):
"""Test get cluster."""
def setUp(self):
super(TestGetCluster, self).setUp()
def tearDown(self):
super(TestGetCluster, self).tearDown()
def test_get_cluster(self):
get_cluster = cluster.get_cluster(
self.cluster_id,
user=self.user_object,
)
self.assertIsNotNone(get_cluster)
self.assertEqual(get_cluster['name'], 'test_cluster1')
def test_non_exsit_cluster_id(self):
self.assertRaises(
exception.RecordNotExists,
cluster.get_cluster,
99,
user=self.user_object,
)
class TestAddCluster(ClusterTestCase):
"""Test add cluster."""
def setUp(self):
super(TestAddCluster, self).setUp()
def tearDown(self):
super(TestAddCluster, self).tearDown()
def test_add_cluster(self):
cluster.add_cluster(
user=self.user_object,
adapter_id=self.adapter_id,
os_id=self.os_id,
flavor_id=self.flavor_id,
name='test_add_cluster'
)
add_clusters = cluster.list_clusters(user=self.user_object)
result = []
for add_cluster in add_clusters:
result.append(add_cluster['name'])
self.assertIn('test_add_cluster', result)
def test_add_cluster_position_args(self):
cluster.add_cluster(
True,
'test_add_cluster_position',
user=self.user_object,
adapter_id=self.adapter_id,
os_id=self.os_id,
flavor_id=self.flavor_id,
)
add_clusters = cluster.list_clusters(user=self.user_object)
result = []
for add_cluster in add_clusters:
result.append(add_cluster['name'])
self.assertIn('test_add_cluster_position', result)
def test_add_cluster_session(self):
with database.session() as session:
cluster.add_cluster(
user=self.user_object,
adapter_id=self.adapter_id,
os_id=self.os_id,
flavor_id=self.flavor_id,
name='test_add_cluster_session',
session=session
)
add_clusters = cluster.list_clusters(user=self.user_object)
result = []
for add_cluster in add_clusters:
result.append(add_cluster['name'])
self.assertIn('test_add_cluster_session', result)
class TestUpdateCluster(ClusterTestCase):
"""Test update cluster."""
def setUp(self):
super(TestUpdateCluster, self).setUp()
def tearDown(self):
super(TestUpdateCluster, self).tearDown()
def test_update_cluster(self):
cluster.update_cluster(
self.cluster_id,
user=self.user_object,
name='test_update_cluster'
)
update_cluster = cluster.get_cluster(
self.cluster_id,
user=self.user_object,
)
self.assertEqual(update_cluster['name'], 'test_update_cluster')
def test_duplicate_name(self):
cluster.update_cluster(
self.cluster_id,
user=self.user_object,
name='test_update_cluster'
)
self.assertRaises(
exception.DuplicatedRecord,
cluster.update_cluster,
2,
user=self.user_object,
name='test_update_cluster'
)
def test_is_cluster_editable(self):
# cluster should be editable for expansion purposes.
raised = False
cluster.update_cluster_state(
self.cluster_id,
user=self.user_object,
state='INSTALLING'
)
self.assertFalse(raised, exception.Forbidden)
class TestDelCluster(ClusterTestCase):
"""Test delete cluster."""
def setUp(self):
super(TestDelCluster, self).setUp()
def tearDown(self):
super(TestDelCluster, self).setUp()
def test_del_cluster(self):
from compass.tasks import client as celery_client
celery_client.celery.send_task = mock.Mock()
cluster.del_cluster(
self.cluster_id,
user=self.user_object,
)
del_clusters = cluster.list_clusters(
user=self.user_object,
)
for del_cluster in del_clusters:
self.assertNotEqual(1, del_cluster['id'])
def test_is_cluster_editable(self):
# cluster should be editable for expansion purposes.
raised = False
cluster.update_cluster_state(
self.cluster_id,
user=self.user_object,
state='INSTALLING'
)
self.assertFalse(raised, exception.Forbidden)
class TestGetClusterConfig(ClusterTestCase):
"""Test get cluster config."""
def setUp(self):
super(TestGetClusterConfig, self).setUp()
cluster.update_cluster_config(
self.cluster_id,
user=self.user_object,
os_config=self.os_configs,
package_config=self.package_configs
)
def tearDown(self):
super(TestGetClusterConfig, self).tearDown()
def test_get_cluster_config(self):
cluster_config = cluster.get_cluster_config(
self.cluster_id,
user=self.user_object,
)
package_config = cluster_config['package_config']
os_config = cluster_config['os_config']
self.assertItemsEqual(package_config, self.package_configs)
self.assertItemsEqual(os_config, self.os_configs)
class TestGetClusterDeployedConfig(ClusterTestCase):
def setUp(self):
super(TestGetClusterDeployedConfig, self).setUp()
cluster.update_cluster_config(
self.cluster_id,
user=self.user_object,
os_config=self.os_configs,
package_config=self.package_configs
)
cluster.update_cluster_host(
self.cluster_id,
self.host_id[0],
user=self.user_object,
roles=['allinone-compute']
)
cluster.review_cluster(
self.cluster_id,
user=self.user_object,
review={
'hosts': [self.host_id[0]]
}
)
cluster.update_cluster_deployed_config(
self.cluster_id,
user=self.user_object,
os_config=self.os_configs,
package_config=self.package_configs
)
def tearDown(self):
super(TestGetClusterDeployedConfig, self).tearDown()
def test_get_cluster_deployed_config(self):
configs = cluster.get_cluster_deployed_config(
self.cluster_id,
user=self.user_object,
)
os_config = configs['deployed_os_config']
package_config = configs['deployed_package_config']
self.assertItemsEqual(os_config, self.os_configs)
self.assertItemsEqual(package_config, self.package_configs)
class TestGetClusterMetadata(ClusterTestCase):
"""Test get cluster metadata."""
def setUp(self):
super(TestGetClusterMetadata, self).setUp()
def tearDown(self):
super(TestGetClusterMetadata, self).tearDown()
def test_get_cluster_metadata(self):
cluster_metadata = cluster.get_cluster_metadata(
self.cluster_id,
user=self.user_object,
)
results = []
for k, v in cluster_metadata.items():
results.append(k)
expected = ['os_config', 'package_config']
self.assertIsNotNone(cluster_metadata)
for result in results:
self.assertIn(result, expected)
class TestUpdateClusterConfig(ClusterTestCase):
"""Test update cluster config."""
def setUp(self):
super(TestUpdateClusterConfig, self).setUp()
def tearDown(self):
super(TestUpdateClusterConfig, self).tearDown()
def test_update_cluster_config(self):
cluster.update_cluster_config(
self.cluster_id,
user=self.user_object,
put_os_config=self.os_configs,
put_package_config=self.package_configs
)
update_cluster_config = cluster.get_cluster_config(
self.cluster_id,
user=self.user_object,
)
package_config = update_cluster_config['package_config']
os_config = update_cluster_config['os_config']
self.assertItemsEqual(package_config, self.package_configs)
self.assertItemsEqual(os_config, self.os_configs)
class TestPatchClusterConfig(ClusterTestCase):
"""Test patch cluster config."""
def setUp(self):
super(TestPatchClusterConfig, self).setUp()
def tearDown(self):
super(TestPatchClusterConfig, self).tearDown()
def test_patch_cluster_config(self):
patch_cluster_config = cluster.patch_cluster_config(
self.cluster_id,
user=self.user_object,
package_config=self.package_configs,
os_config=self.os_configs
)
package_config = patch_cluster_config['package_config']
os_config = patch_cluster_config['os_config']
self.assertItemsEqual(package_config, self.package_configs)
self.assertItemsEqual(os_config, self.os_configs)
class TestDelClusterConfig(ClusterTestCase):
"""Test delete a cluster config."""
def setUp(self):
super(TestDelClusterConfig, self).setUp()
cluster.update_cluster_config(
self.cluster_id,
user=self.user_object,
os_config=self.os_configs,
package_config=self.package_configs
)
def tearDown(self):
super(TestDelClusterConfig, self).tearDown()
def test_del_cluster_config(self):
cluster.del_cluster_config(
self.cluster_id,
user=self.user_object,
)
del_cluster_config = cluster.get_cluster_config(
self.cluster_id,
user=self.user_object,
)
configs = []
for k, v in del_cluster_config.items():
if k == 'package_config' or k == 'os_config':
configs.append(v)
for config in configs:
self.assertEqual(config, {})
def test_cluster_editable(self):
# cluster should be editable for expansion purposes.
raised = False
cluster.update_cluster_state(
self.cluster_id,
user=self.user_object,
state='INSTALLING'
)
self.assertFalse(raised, exception.Forbidden)
class TestListClusterHosts(ClusterTestCase):
"""Test list cluster hosts."""
def setUp(self):
super(TestListClusterHosts, self).setUp()
def tearDown(self):
super(TestListClusterHosts, self).tearDown()
def test_list_cluster_hosts(self):
list_cluster_hosts = cluster.list_cluster_hosts(
self.cluster_id,
user=self.user_object,
)
results = []
expected = ['28:6e:d4:46:c4:25', '00:0c:29:bf:eb:1d']
for item in list_cluster_hosts:
results.append(item['mac'])
for result in results:
self.assertIn(result, expected)
class TestListClusterhosts(ClusterTestCase):
"""Test list clusterhosts."""
def setUp(self):
super(TestListClusterhosts, self).setUp()
def tearDown(self):
super(TestListClusterhosts, self).tearDown()
def test_list_clusterhosts(self):
list_clusterhosts = cluster.list_clusterhosts(user=self.user_object)
results = []
expected = ['28:6e:d4:46:c4:25', '00:0c:29:bf:eb:1d']
for item in list_clusterhosts:
results.append(item['mac'])
for result in results:
self.assertIn(result, expected)
class TestGetClusterHost(ClusterTestCase):
"""Test get cluster host."""
def setUp(self):
super(TestGetClusterHost, self).setUp()
def tearDown(self):
super(TestGetClusterHost, self).tearDown()
def test_get_cluster_host(self):
get_cluster_host = cluster.get_cluster_host(
self.cluster_id,
self.host_id[1],
user=self.user_object,
)
self.assertEqual(get_cluster_host['mac'], '00:0c:29:bf:eb:1d')
class TestGetClusterhost(ClusterTestCase):
"""Test get clusterhost."""
def setUp(self):
super(TestGetClusterhost, self).setUp()
def tearDown(self):
super(TestGetClusterhost, self).tearDown()
def test_get_clusterhost(self):
get_clusterhost = cluster.get_clusterhost(
self.clusterhost_id[1],
user=self.user_object,
)
self.assertEqual(get_clusterhost['mac'], '00:0c:29:bf:eb:1d')
class TestAddClusterHost(ClusterTestCase):
"""Test add cluster host."""
def setUp(self):
super(TestAddClusterHost, self).setUp()
switch.add_switch_machine(
self.switch_id,
user=self.user_object,
mac='00:0c:29:5b:ee:eb',
port='1'
)
machines = machine.list_machines(user=self.user_object)
self.add_machine_id = None
for item in machines:
if item['mac'] == '00:0c:29:5b:ee:eb':
self.add_machine_id = item['id']
def tearDown(self):
super(TestAddClusterHost, self).tearDown()
def test_add_cluster_host(self):
# add a cluster_host
cluster.add_cluster_host(
self.cluster_id,
user=self.user_object,
machine_id=self.add_machine_id,
name='test_add_cluster_host'
)
add_cluster_hosts = cluster.list_clusterhosts(user=self.user_object)
expected = {
'hostname': 'test_add_cluster_host',
'owner': '<EMAIL>',
'name': 'test_add_cluster_host.test_cluster1',
}
self.assertTrue(
all(item in add_cluster_hosts[2].items()
for item in expected.items()))
| |
2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (<EMAIL>; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (<EMAIL>; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (<EMAIL>; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; | |
r"""
Series constructor for modular forms for Hecke triangle groups
AUTHORS:
- Based on the thesis of <NAME> (2008)
- <NAME> (2013): initial version
.. NOTE:
``J_inv_ZZ`` is the main function used to determine all Fourier expansions.
"""
#*****************************************************************************
# Copyright (C) 2013-2014 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.rings.all import ZZ, QQ, infinity, PolynomialRing, LaurentSeries, PowerSeriesRing, FractionField
from sage.rings.big_oh import O
from sage.functions.all import exp
from sage.arith.all import bernoulli, sigma, rising_factorial
from sage.structure.sage_object import SageObject
from sage.structure.unique_representation import UniqueRepresentation
from sage.misc.cachefunc import cached_method
from hecke_triangle_groups import HeckeTriangleGroup
class MFSeriesConstructor(SageObject,UniqueRepresentation):
r"""
Constructor for the Fourier expansion of some
(specific, basic) modular forms.
The constructor is used by forms elements in case
their Fourier expansion is needed or requested.
"""
@staticmethod
def __classcall__(cls, group = HeckeTriangleGroup(3), prec=ZZ(10)):
r"""
Return a (cached) instance with canonical parameters.
.. NOTE:
For each choice of group and precision the constructor is
cached (only) once. Further calculations with different
base rings and possibly numerical parameters are based on
the same cached instance.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.series_constructor import MFSeriesConstructor
sage: MFSeriesConstructor() == MFSeriesConstructor(3, 10)
True
sage: MFSeriesConstructor(group=4).hecke_n()
4
sage: MFSeriesConstructor(group=5, prec=12).prec()
12
"""
if (group==infinity):
group = HeckeTriangleGroup(infinity)
else:
try:
group = HeckeTriangleGroup(ZZ(group))
except TypeError:
group = HeckeTriangleGroup(group.n())
prec=ZZ(prec)
# We don't need this assumption the precision may in principle also be negative.
# if (prec<1):
# raise Exception("prec must be an Integer >=1")
return super(MFSeriesConstructor,cls).__classcall__(cls, group, prec)
def __init__(self, group, prec):
r"""
Constructor for the Fourier expansion of some
(specific, basic) modular forms.
INPUT:
- ``group`` -- A Hecke triangle group (default: HeckeTriangleGroup(3)).
- ``prec`` -- An integer (default: 10), the default precision used
in calculations in the LaurentSeriesRing or PowerSeriesRing.
OUTPUT:
The constructor for Fourier expansion with the specified settings.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.series_constructor import MFSeriesConstructor
sage: MFC = MFSeriesConstructor()
sage: MFC
Power series constructor for Hecke modular forms for n=3 with (basic series) precision 10
sage: MFC.group()
Hecke triangle group for n = 3
sage: MFC.prec()
10
sage: MFC._series_ring
Power Series Ring in q over Rational Field
sage: MFSeriesConstructor(group=4)
Power series constructor for Hecke modular forms for n=4 with (basic series) precision 10
sage: MFSeriesConstructor(group=5, prec=12)
Power series constructor for Hecke modular forms for n=5 with (basic series) precision 12
sage: MFSeriesConstructor(group=infinity)
Power series constructor for Hecke modular forms for n=+Infinity with (basic series) precision 10
"""
self._group = group
self._prec = prec
self._series_ring = PowerSeriesRing(QQ,'q',default_prec=self._prec)
def _repr_(self):
r"""
Return the string representation of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.series_constructor import MFSeriesConstructor
sage: MFSeriesConstructor(group=4)
Power series constructor for Hecke modular forms for n=4 with (basic series) precision 10
sage: MFSeriesConstructor(group=5, prec=12)
Power series constructor for Hecke modular forms for n=5 with (basic series) precision 12
"""
return "Power series constructor for Hecke modular forms for n={} with (basic series) precision {}".\
format(self._group.n(), self._prec)
def group(self):
r"""
Return the (Hecke triangle) group of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.series_constructor import MFSeriesConstructor
sage: MFSeriesConstructor(group=4).group()
Hecke triangle group for n = 4
"""
return self._group
def hecke_n(self):
r"""
Return the parameter ``n`` of the (Hecke triangle) group of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.series_constructor import MFSeriesConstructor
sage: MFSeriesConstructor(group=4).hecke_n()
4
"""
return self._group.n()
def prec(self):
r"""
Return the used default precision for the PowerSeriesRing or LaurentSeriesRing.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.series_constructor import MFSeriesConstructor
sage: MFSeriesConstructor(group=5).prec()
10
sage: MFSeriesConstructor(group=5, prec=20).prec()
20
"""
return self._prec
@cached_method
def J_inv_ZZ(self):
r"""
Return the rational Fourier expansion of ``J_inv``,
where the parameter ``d`` is replaced by ``1``.
This is the main function used to determine all Fourier expansions!
.. NOTE:
The Fourier expansion of ``J_inv`` for ``d!=1``
is given by ``J_inv_ZZ(q/d)``.
.. TODO:
The functions that are used in this implementation are
products of hypergeometric series with other, elementary,
functions. Implement them and clean up this representation.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.series_constructor import MFSeriesConstructor
sage: MFSeriesConstructor(prec=3).J_inv_ZZ()
q^-1 + 31/72 + 1823/27648*q + O(q^2)
sage: MFSeriesConstructor(group=5, prec=3).J_inv_ZZ()
q^-1 + 79/200 + 42877/640000*q + O(q^2)
sage: MFSeriesConstructor(group=5, prec=3).J_inv_ZZ().parent()
Laurent Series Ring in q over Rational Field
sage: MFSeriesConstructor(group=infinity, prec=3).J_inv_ZZ()
q^-1 + 3/8 + 69/1024*q + O(q^2)
"""
F1 = lambda a,b: self._series_ring(
[ ZZ(0) ]
+ [
rising_factorial(a,k) * rising_factorial(b,k) / (ZZ(k).factorial())**2
* sum(ZZ(1)/(a+j) + ZZ(1)/(b+j) - ZZ(2)/ZZ(1+j)
for j in range(ZZ(0),ZZ(k))
)
for k in range(ZZ(1), ZZ(self._prec+1))
],
ZZ(self._prec+1)
)
F = lambda a,b,c: self._series_ring(
[
rising_factorial(a,k) * rising_factorial(b,k) / rising_factorial(c,k) / ZZ(k).factorial()
for k in range(ZZ(0), ZZ(self._prec+1))
],
ZZ(self._prec+1)
)
a = self._group.alpha()
b = self._group.beta()
Phi = F1(a,b) / F(a,b,ZZ(1))
q = self._series_ring.gen()
# the current implementation of power series reversion is slow
# J_inv_ZZ = ZZ(1) / ((q*Phi.exp()).reverse())
temp_f = (q*Phi.exp()).polynomial()
new_f = temp_f.revert_series(temp_f.degree()+1)
J_inv_ZZ = ZZ(1) / (new_f + O(q**(temp_f.degree()+1)))
return J_inv_ZZ
@cached_method
def f_rho_ZZ(self):
r"""
Return the rational Fourier expansion of ``f_rho``,
where the parameter ``d`` is replaced by ``1``.
.. NOTE:
The Fourier expansion of ``f_rho`` for ``d!=1``
is given by ``f_rho_ZZ(q/d)``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.series_constructor import MFSeriesConstructor
sage: MFSeriesConstructor(prec=3).f_rho_ZZ()
1 + 5/36*q + 5/6912*q^2 + O(q^3)
sage: MFSeriesConstructor(group=5, prec=3).f_rho_ZZ()
1 + 7/100*q + 21/160000*q^2 + O(q^3)
sage: MFSeriesConstructor(group=5, prec=3).f_rho_ZZ().parent()
Power Series Ring in q over Rational Field
sage: MFSeriesConstructor(group=infinity, prec=3).f_rho_ZZ()
1
"""
q = self._series_ring.gen()
n = self.hecke_n()
if (n == infinity):
f_rho_ZZ = self._series_ring(1)
else:
temp_expr = ((-q*self.J_inv_ZZ().derivative())**2/(self.J_inv_ZZ()*(self.J_inv_ZZ()-1))).power_series()
f_rho_ZZ = (temp_expr.log()/(n-2)).exp()
return f_rho_ZZ
@cached_method
def f_i_ZZ(self):
r"""
Return the rational Fourier expansion of ``f_i``,
where the parameter ``d`` is replaced by ``1``.
.. NOTE:
The Fourier expansion of ``f_i`` for ``d!=1``
is given by ``f_i_ZZ(q/d)``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.series_constructor import MFSeriesConstructor
sage: MFSeriesConstructor(prec=3).f_i_ZZ()
1 - 7/24*q - 77/13824*q^2 + O(q^3)
sage: MFSeriesConstructor(group=5, prec=3).f_i_ZZ()
1 - 13/40*q - 351/64000*q^2 + O(q^3)
sage: MFSeriesConstructor(group=5, prec=3).f_i_ZZ().parent()
Power Series Ring in q over Rational Field
sage: MFSeriesConstructor(group=infinity, prec=3).f_i_ZZ()
1 - 3/8*q + 3/512*q^2 + O(q^3)
"""
q = self._series_ring.gen()
n = self.hecke_n()
if (n == infinity):
f_i_ZZ = (-q*self.J_inv_ZZ().derivative()/self.J_inv_ZZ()).power_series()
else:
temp_expr = ((-q*self.J_inv_ZZ().derivative())**n/(self.J_inv_ZZ()**(n-1)*(self.J_inv_ZZ()-1))).power_series()
f_i_ZZ = (temp_expr.log()/(n-2)).exp()
return f_i_ZZ
@cached_method
def f_inf_ZZ(self):
r"""
Return the rational Fourier expansion of ``f_inf``,
where the parameter ``d`` is replaced by ``1``.
.. NOTE:
The Fourier expansion of ``f_inf`` for ``d!=1``
is given by ``d*f_inf_ZZ(q/d)``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.series_constructor import MFSeriesConstructor
sage: MFSeriesConstructor(prec=3).f_inf_ZZ()
q - 1/72*q^2 + 7/82944*q^3 + O(q^4)
sage: MFSeriesConstructor(group=5, prec=3).f_inf_ZZ()
q - 9/200*q^2 + 279/640000*q^3 + O(q^4)
sage: MFSeriesConstructor(group=5, prec=3).f_inf_ZZ().parent()
Power Series Ring in q over Rational Field
sage: MFSeriesConstructor(group=infinity, prec=3).f_inf_ZZ()
q - 1/8*q^2 + 7/1024*q^3 + O(q^4)
"""
q = self._series_ring.gen()
n = self.hecke_n()
if (n == infinity):
f_inf_ZZ = ((-q*self.J_inv_ZZ().derivative())**2/(self.J_inv_ZZ()**2*(self.J_inv_ZZ()-1))).power_series()
else:
temp_expr = ((-q*self.J_inv_ZZ().derivative())**(2*n)/(self.J_inv_ZZ()**(2*n-2)*(self.J_inv_ZZ()-1)**n)/q**(n-2)).power_series()
f_inf_ZZ = (temp_expr.log()/(n-2)).exp()*q
return f_inf_ZZ
@cached_method
def G_inv_ZZ(self):
r"""
Return the rational Fourier expansion of ``G_inv``,
where the parameter ``d`` is replaced by ``1``.
.. NOTE:
The Fourier expansion of ``G_inv`` for ``d!=1``
is given by ``d*G_inv_ZZ(q/d)``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.series_constructor import MFSeriesConstructor
sage: MFSeriesConstructor(group=4, prec=3).G_inv_ZZ()
q^-1 - 3/32 - 955/16384*q + O(q^2)
sage: MFSeriesConstructor(group=8, prec=3).G_inv_ZZ()
q^-1 - 15/128 - 15139/262144*q + O(q^2)
sage: MFSeriesConstructor(group=8, prec=3).G_inv_ZZ().parent()
Laurent Series Ring in q over Rational Field
sage: MFSeriesConstructor(group=infinity, prec=3).G_inv_ZZ()
q^-1 - 1/8 - 59/1024*q + O(q^2)
"""
n = self.hecke_n()
# Note that G_inv is not a weakly holomorphic form (because of the behavior at -1)
if (n == infinity):
q = self._series_ring.gen()
temp_expr = (self.J_inv_ZZ()/self.f_inf_ZZ()*q**2).power_series()
return 1/q*self.f_i_ZZ()*(temp_expr.log()/2).exp()
elif (ZZ(2).divides(n)):
return self.f_i_ZZ()*(self.f_rho_ZZ()**(ZZ(n/ZZ(2))))/self.f_inf_ZZ()
else:
#return self._qseries_ring([])
raise ValueError("G_inv doesn't exist for n={}.".format(self.hecke_n()))
@cached_method
def E4_ZZ(self):
r"""
Return the rational Fourier expansion of ``E_4``,
where the parameter ``d`` is replaced by ``1``.
.. NOTE:
The Fourier expansion of ``E4`` for ``d!=1``
is given by ``E4_ZZ(q/d)``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.series_constructor import MFSeriesConstructor
sage: MFSeriesConstructor(prec=3).E4_ZZ()
1 + 5/36*q + 5/6912*q^2 + O(q^3)
sage: MFSeriesConstructor(group=5, prec=3).E4_ZZ()
1 + 21/100*q + 483/32000*q^2 + O(q^3)
sage: MFSeriesConstructor(group=5, prec=3).E4_ZZ().parent()
Power Series Ring in q over Rational Field
sage: MFSeriesConstructor(group=infinity, prec=3).E4_ZZ()
1 + 1/4*q + 7/256*q^2 + O(q^3)
"""
q = self._series_ring.gen()
E4_ZZ = ((-q*self.J_inv_ZZ().derivative())**2/(self.J_inv_ZZ()*(self.J_inv_ZZ()-1))).power_series()
return E4_ZZ
@cached_method
def E6_ZZ(self):
| |
<reponame>ShegnkaiWu/IoU-aware-single-stage-object-detector-for-accurate-localization<filename>mmdet/models/anchor_heads/iou_aware_retina_head.py
import numpy as np
import torch.nn as nn
import torch
from mmcv.cnn import normal_init
from .anchor_head import AnchorHead
from ..utils import bias_init_with_prob, ConvModule
# added by WSK
from mmdet.core import (AnchorGenerator, anchor_target, delta2bbox, bbox_overlaps,
multi_apply, multiclass_nms, weighted_cross_entropy,
weighted_smoothl1, weighted_binary_cross_entropy,
weighted_sigmoid_focal_loss)
from mmdet.core.loss import weighted_iou_regression_loss
from ..registry import HEADS
from ..builder import build_loss
from mmdet.core.anchor.anchor_target import expand_binary_labels
from mmdet.ops import DeformConv, MaskedConv2d
class FeatureAlignment(nn.Module):
"""Feature Adaption Module.
Feature Adaption Module is implemented based on DCN v1.
It uses anchor shape prediction rather than feature map to
predict offsets of deformable conv layer.
Args:
in_channels (int): Number of channels in the input feature map.
out_channels (int): Number of channels in the output feature map.
kernel_size (int): Deformable conv kernel size.
deformable_groups (int): Deformable conv group size.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
deformable_groups=4):
super(FeatureAlignment, self).__init__()
offset_channels = kernel_size * kernel_size * 2
self.conv_offset = nn.Conv2d(
4, deformable_groups * offset_channels, 1, bias=False)
self.conv_adaption = DeformConv(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
deformable_groups=deformable_groups)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
normal_init(self.conv_offset, std=0.1)
normal_init(self.conv_adaption, std=0.01)
def forward(self, x, shape):
offset = self.conv_offset(shape.detach())
x = self.relu(self.conv_adaption(x, offset))
return x
@HEADS.register_module
class IoUawareRetinaHead(AnchorHead):
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
octave_base_scale=4,
scales_per_octave=3,
conv_cfg=None,
norm_cfg=None,
loss_iou=dict(type='GHMIoU', bins=30, momentum=0.75, use_sigmoid=True, loss_weight=1.0),
**kwargs):
self.stacked_convs = stacked_convs
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
octave_scales = np.array(
[2**(i / scales_per_octave) for i in range(scales_per_octave)])
anchor_scales = octave_scales * octave_base_scale
super(IoUawareRetinaHead, self).__init__(
num_classes, in_channels, anchor_scales=anchor_scales, **kwargs)
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(self.feat_channels, self.num_anchors*4, 3, padding=1)
# added by WSK
# analyze the effect of the shared conv layers between regression head and
# IoU prediction head. The number of conv layers to extract features for
# IoU prediction have to be kept to be 4.
self.shared_conv = 4
if self.shared_conv < 4:
self.iou_convs = nn.ModuleList()
for i in range(4-self.shared_conv):
chn = self.in_channels if (self.shared_conv==0 and i == 0) else self.feat_channels
self.iou_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
)
# feature alignment for IoU prediction
self.use_feature_alignment = False
self.deformable_groups = 4
if self.use_feature_alignment:
self.feature_alignment = FeatureAlignment(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deformable_groups = self.deformable_groups)
self.retina_iou = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
else:
self.retina_iou = nn.Conv2d(self.feat_channels, self.num_anchors, 3, padding=1)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
# added by WSK
if self.shared_conv < 4:
for m in self.iou_convs:
normal_init(m.conv, std=0.01)
normal_init(self.retina_iou, std=0.01)
# added by WSK
if self.use_feature_alignment:
self.feature_alignment.init_weights()
def forward_single(self, x):
"""
process one level of FPN
:param x: one feature level of FPN. tensor of size (batch, self.feat_channels, width_i, height_i)
:return:
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
reg_feat_list = []
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
reg_feat_list.append(reg_feat)
cls_score = self.retina_cls(cls_feat) # (batch, A*num_class, width_i, height_i)
bbox_pred = self.retina_reg(reg_feat) # (batch, A*4, width_i, height_i)
#added by WSK
# concatenation of regression prediction and feature map for the input of
# IoU prediction head
# bbox_pred_clone = bbox_pred.clone()
# bbox_pred_clone = bbox_pred_clone.detach()
# reg_feat = torch.cat([reg_feat_list[-1],bbox_pred_clone], 1)
# analyze the effect of the shared conv layers between regression head and
# IoU prediction head.
if self.shared_conv == 0:
iou_feat = x
else:
iou_feat = reg_feat_list[self.shared_conv - 1]
if self.shared_conv < 4:
for iou_conv in self.iou_convs:
iou_feat = iou_conv(iou_feat)
# iou_pred = self.retina_iou(iou_feat) # (batch, A, width_i, height_i)
# feature alignment for iou prediction
if self.use_feature_alignment:
bbox_pred_list = torch.split(bbox_pred, 4, dim=1)
iou_pred_list = []
for i in range(len(bbox_pred_list)):
iou_feat_aligned = self.feature_alignment(iou_feat, bbox_pred_list[i])
iou_pred_single_anchor = self.retina_iou(iou_feat_aligned) # (batch, 1, width_i, height_i)
iou_pred_list.append(iou_pred_single_anchor)
iou_pred = torch.cat(iou_pred_list, 1) # (batch, A, width_i, height_i)
else:
iou_pred = self.retina_iou(iou_feat) # (batch, A, width_i, height_i)
return cls_score, bbox_pred, iou_pred
def loss_single(self, cls_score, bbox_pred, iou_pred,
labels, label_weights, bbox_targets, bbox_weights,
level_anchor,
num_total_samples,
gt_bboxes, # added by <NAME>
cfg):
"""
compute loss for a single layer of the prediction pyramid.
:param cls_score: tensor of shape (batch, A*num_class, width_i, height_i)
:param bbox_pred: tensor of shape (batch, A*4, width_i, height_i)
:param iou_pred: tensor of shape (batch, A, width_i, height_i), sigmoid layer has no been applied.
:param labels: For RetinaNet, tensor of shape (batch, A*width*height) storing gt labels such as 1, 2, 80 for
positive examples and 0 for negatives or others.
:param label_weights: the same as labels. 1 for positive and negative examples, 0 for invalid anchors and neutrals.
:param bbox_targets: tensor of shape (batch, A*width*height, 4). Store the parametrized coordinates of
targets for positives and 0 for negatives and others.
:param bbox_weights: tensor of shape (batch, A*width*height, 4). 1 for positives and 0 for negatives and others.
:param level_anchor: tensor of shape (batch, A*width*height, 4)
:param num_total_samples:
:param gt_bboxes: list of tensor. gt_boxes[i].size() = (num_truth_i, 4), store the top-left and bottom-right corner of
truth boxes for the i-th image.
:param cfg:
:return:
"""
bbox_targets = bbox_targets.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
# added by <NAME>
level_anchor = level_anchor.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) # (batch*A*width_i*height_i, 4)
IoU_balanced_Cls = self.IoU_balanced_Cls
IoU_balanced_Loc = self.IoU_balanced_Loc
pred_box = delta2bbox(level_anchor, bbox_pred, self.target_means, self.target_stds)
# the negatives will stores the anchors information(x, y, w, h)
target_box = delta2bbox(level_anchor, bbox_targets, self.target_means, self.target_stds)
iou = bbox_overlaps(target_box, pred_box, is_aligned=True) # (batch*width_i*height_i*A)
if IoU_balanced_Loc:
loss_bbox = self.loss_bbox(
bbox_pred,
bbox_targets,
iou,
bbox_weights,
avg_factor=num_total_samples)
else:
loss_bbox = self.loss_bbox(
bbox_pred,
bbox_targets,
bbox_weights,
avg_factor=num_total_samples)
# added by WSK
iou_pred = iou_pred.permute(0, 2, 3, 1).reshape(-1) # (batch*width_i*height_i*A)
bbox_weight_list = torch.split(bbox_weights, 1, -1)
bbox_weight = bbox_weight_list[0]
bbox_weight = torch.squeeze(bbox_weight) # (batch*A*width_i*height_i)
weight_iou = 1.0
loss_iou = weight_iou*weighted_iou_regression_loss(iou_pred, iou, bbox_weight, avg_factor=num_total_samples)
# classification loss: focal loss for positive and negative examples
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
# # added by <NAME>
if self.use_sigmoid_cls:
# transform tensor 'label' from size (batch*A*width*height) to size (batch*A*width*height, num_class)
# and the same as tensor 'label_weights'. may be wrong for rpn
labels, label_weights = expand_binary_labels(labels, label_weights, self.cls_out_channels)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
if IoU_balanced_Cls:
loss_cls = self.loss_cls(
cls_score,
labels,
label_weights,
iou,
#
avg_factor=num_total_samples)
# print('test')
else:
loss_cls = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=num_total_samples)
return loss_cls, loss_bbox, loss_iou
def loss(self,
cls_scores,
bbox_preds,
iou_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
"""
:param cls_scores: list[Tensor]. len(cls_scores) equals to the number of feature map levels.
and cls_scores[i].size() is (batch, A*C, width_i, height_i). width_i and height_i is the size
of the i-th level feature map.
:param bbox_preds: list[Tensor]. len(bbox_preds) equals to the number of feature map levels.
and bbox_preds[i].size() is (batch, A*4, width_i, height_i). width_i and height_i is the size
of the i-th level feature map.
:param iou_preds: list[Tensor]. len(iou_preds) equals to the number of feature map levels.
and iou_preds[i].size() is (batch, A, width_i, height_i). width_i and height_i is the size
of the i-th level feature map.
:param gt_bboxes: list[Tensor],Ground truth bboxes of each image. store the top-left and bottom-right corners
in the image coordinte;
:param gt_labels:
:param img_metas: list[dict], Meta info of each image.
:param cfg:
:param gt_bboxes_ignore:
:return:
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == len(self.anchor_generators)
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = anchor_target(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
self.target_means,
self.target_stds,
cfg,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
sampling=self.sampling)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg,
level_anchor_list) = cls_reg_targets
# added by WSK
# If sampling is adopted, num_total_samples = num_total_pos + num_total_neg;
# otherwise, num_total_samples = num_total_pos. For 'FocalLoss', 'GHMC', 'IOUbalancedSigmoidFocalLoss',
# sampling is not adopted.
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
losses_cls, losses_bbox, losses_iou= multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
iou_preds,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
level_anchor_list, # added by Shengkai Wu
num_total_samples=num_total_samples,
gt_bboxes = gt_bboxes, # added by Shengkai Wu
cfg=cfg)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox, losses_iou=losses_iou)
def get_bboxes(self,
cls_scores,
bbox_preds,
| |
= pp.Group(pp.Word("a")("A"))
bbb = pp.Group(pp.Word("b")("B"))
ccc = pp.Group(":" + pp.Word("c")("C"))
g1 = "XXX" + (aaa | bbb | ccc)[...]
teststring = "XXX b bb a bbb bbbb aa bbbbb :c bbbbbb aaa"
names = []
print(g1.parseString(teststring).dump())
for t in g1.parseString(teststring):
print(t, repr(t))
try:
names.append(t[0].getName())
except Exception:
try:
names.append(t.getName())
except Exception:
names.append(None)
print(teststring)
print(names)
self.assertEqual(
[None, "B", "B", "A", "B", "B", "A", "B", None, "B", "A"],
names,
"failure in getting names for tokens",
)
from pyparsing import Keyword, Word, alphas, OneOrMore
IF, AND, BUT = map(Keyword, "if and but".split())
ident = ~(IF | AND | BUT) + Word(alphas)("non-key")
scanner = OneOrMore(IF | AND | BUT | ident)
def getNameTester(s, l, t):
print(t, t.getName())
ident.addParseAction(getNameTester)
scanner.parseString("lsjd sldkjf IF Saslkj AND lsdjf")
# test ParseResults.get() method
print("verify behavior of ParseResults.get()")
# use sum() to merge separate groups into single ParseResults
res = sum(g1.parseString(teststring)[1:])
print(res.dump())
print(res.get("A", "A not found"))
print(res.get("D", "!D"))
self.assertEqual(
"aaa", res.get("A", "A not found"), "get on existing key failed"
)
self.assertEqual("!D", res.get("D", "!D"), "get on missing key failed")
def testOptionalBeyondEndOfString(self):
print("verify handling of Optional's beyond the end of string")
testGrammar = "A" + pp.Optional("B") + pp.Optional("C") + pp.Optional("D")
testGrammar.parseString("A")
testGrammar.parseString("AB")
def testCreateLiteralWithEmptyString(self):
# test creating Literal with empty string
print('verify non-fatal usage of Literal("")')
with self.assertWarns(
SyntaxWarning, msg="failed to warn use of empty string for Literal"
):
e = pp.Literal("")
try:
e.parseString("SLJFD")
except Exception as e:
self.fail("Failed to handle empty Literal")
def testLineMethodSpecialCaseAtStart(self):
# test line() behavior when starting at 0 and the opening line is an \n
print("verify correct line() behavior when first line is empty string")
self.assertEqual(
"",
pp.line(0, "\nabc\ndef\n"),
"Error in line() with empty first line in text",
)
txt = "\nabc\ndef\n"
results = [pp.line(i, txt) for i in range(len(txt))]
self.assertEqual(
["", "abc", "abc", "abc", "abc", "def", "def", "def", "def"],
results,
"Error in line() with empty first line in text",
)
txt = "abc\ndef\n"
results = [pp.line(i, txt) for i in range(len(txt))]
self.assertEqual(
["abc", "abc", "abc", "abc", "def", "def", "def", "def"],
results,
"Error in line() with non-empty first line in text",
)
def testRepeatedTokensWhenPackratting(self):
# test bugfix with repeated tokens when packrat parsing enabled
print("verify behavior with repeated tokens when packrat parsing is enabled")
a = pp.Literal("a")
b = pp.Literal("b")
c = pp.Literal("c")
abb = a + b + b
abc = a + b + c
aba = a + b + a
grammar = abb | abc | aba
self.assertEqual(
"aba", "".join(grammar.parseString("aba")), "Packrat ABA failure!"
)
def testSetResultsNameWithOneOrMoreAndZeroOrMore(self):
print("verify behavior of setResultsName with OneOrMore and ZeroOrMore")
stmt = pp.Keyword("test")
print(stmt[...]("tests").parseString("test test").tests)
print(stmt[1, ...]("tests").parseString("test test").tests)
print(pp.Optional(stmt[1, ...]("tests")).parseString("test test").tests)
print(pp.Optional(stmt[1, ...])("tests").parseString("test test").tests)
print(
pp.Optional(pp.delimitedList(stmt))("tests").parseString("test,test").tests
)
self.assertEqual(
2,
len(stmt[...]("tests").parseString("test test").tests),
"ZeroOrMore failure with setResultsName",
)
self.assertEqual(
2,
len(stmt[1, ...]("tests").parseString("test test").tests),
"OneOrMore failure with setResultsName",
)
self.assertEqual(
2,
len(pp.Optional(stmt[1, ...]("tests")).parseString("test test").tests),
"OneOrMore failure with setResultsName",
)
self.assertEqual(
2,
len(
pp.Optional(pp.delimitedList(stmt))("tests")
.parseString("test,test")
.tests
),
"delimitedList failure with setResultsName",
)
self.assertEqual(
2,
len((stmt * 2)("tests").parseString("test test").tests),
"multiplied(1) failure with setResultsName",
)
self.assertEqual(
2,
len(stmt[..., 2]("tests").parseString("test test").tests),
"multiplied(2) failure with setResultsName",
)
self.assertEqual(
2,
len(stmt[1, ...]("tests").parseString("test test").tests),
"multiplied(3) failure with setResultsName",
)
self.assertEqual(
2,
len(stmt[2, ...]("tests").parseString("test test").tests),
"multiplied(3) failure with setResultsName",
)
def testParseResultsReprWithResultsNames(self):
word = pp.Word(pp.printables)("word")
res = word[...].parseString("test blub")
print(repr(res))
print(res["word"])
print(res.asDict())
self.assertEqual(
"(['test', 'blub'], {'word': 'blub'})",
repr(res),
"incorrect repr for ParseResults with listAllMatches=False",
)
word = pp.Word(pp.printables)("word*")
res = word[...].parseString("test blub")
print(repr(res))
print(res["word"])
print(res.asDict())
self.assertEqual(
"(['test', 'blub'], {'word': ['test', 'blub']})",
repr(res),
"incorrect repr for ParseResults with listAllMatches=True",
)
def testWarnUsingLshiftForward(self):
import warnings
print(
"verify that using '<<' operator with a Forward raises a warning if there is a dangling '|' operator"
)
fwd = pp.Forward()
print("unsafe << and |, but diag not enabled, should not warn")
fwd << pp.Word("a") | pp.Word("b")
pp.__diag__.enable("warn_on_match_first_with_lshift_operator")
with self.assertWarns(
SyntaxWarning, msg="failed to warn of using << and | operators"
):
fwd = pp.Forward()
print("unsafe << and |, should warn")
fwd << pp.Word("a") | pp.Word("b")
with self.assertWarns(
SyntaxWarning,
msg="failed to warn of using << and | operators (within lambda)",
):
fwd = pp.Forward()
print("unsafe << and |, should warn")
fwd_fn = lambda expr1, expr2: fwd << expr1 | expr2
fwd_fn(pp.Word("a"), pp.Word("b"))
fwd = pp.Forward()
print("safe <<= and |, should not warn")
fwd <<= pp.Word("a") | pp.Word("b")
c = fwd | pp.Word("c")
print("safe << and (|), should not warn")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
fwd = pp.Forward()
fwd << (pp.Word("a") | pp.Word("b"))
try:
c = fwd | pp.Word("c")
except Exception as e:
self.fail("raised warning when it should not have")
def testParseExpressionsWithRegex(self):
from itertools import product
match_empty_regex = pp.Regex(r"[a-z]*")
match_nonempty_regex = pp.Regex(r"[a-z]+")
parser_classes = pp.ParseExpression.__subclasses__()
test_string = "abc def"
expected = ["abc"]
for expr, cls in product(
(match_nonempty_regex, match_empty_regex), parser_classes
):
print(expr, cls)
parser = cls([expr])
parsed_result = parser.parseString(test_string)
print(parsed_result.dump())
self.assertParseResultsEquals(parsed_result, expected)
for expr, cls in product(
(match_nonempty_regex, match_empty_regex), (pp.MatchFirst, pp.Or)
):
parser = cls([expr, expr])
print(parser)
parsed_result = parser.parseString(test_string)
print(parsed_result.dump())
self.assertParseResultsEquals(parsed_result, expected)
def testAssertParseAndCheckDict(self):
"""test assertParseAndCheckDict in test framework"""
expr = pp.Word(pp.alphas)("item") + pp.Word(pp.nums)("qty")
self.assertParseAndCheckDict(
expr, "balloon 25", {"item": "balloon", "qty": "25"}
)
exprWithInt = pp.Word(pp.alphas)("item") + ppc.integer("qty")
self.assertParseAndCheckDict(
exprWithInt, "rucksack 49", {"item": "rucksack", "qty": 49}
)
def testOnlyOnce(self):
"""test class OnlyOnce and its reset method"""
# use a parse action to compute the sum of the parsed integers,
# and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
pa = pp.OnlyOnce(append_sum)
expr = pp.OneOrMore(pp.Word(pp.nums)).addParseAction(pa)
result = expr.parseString("0 123 321")
print(result.dump())
expected = ["0", "123", "321", 444]
self.assertParseResultsEquals(
result, expected, msg="issue with OnlyOnce first call"
)
with self.assertRaisesParseException(
msg="failed to raise exception calling OnlyOnce more than once"
):
result2 = expr.parseString("1 2 3 4 5")
pa.reset()
result = expr.parseString("100 200 300")
print(result.dump())
expected = ["100", "200", "300", 600]
self.assertParseResultsEquals(
result, expected, msg="issue with OnlyOnce after reset"
)
def testGoToColumn(self):
"""tests for GoToColumn class"""
dateExpr = pp.Regex(r"\d\d(\.\d\d){2}")("date")
numExpr = ppc.number("num")
sample = """\
date Not Important value NotImportant2
11.11.13 | useless . useless,21 useless 2 | 14.21 | asmdakldm
21.12.12 | fmpaosmfpoamsp 4 | 41 | ajfa9si90""".splitlines()
# Column number finds match
patt = dateExpr + pp.GoToColumn(70).ignore("|") + numExpr + pp.restOfLine
infile = iter(sample)
next(infile)
expecteds = [["11.11.13", 14.21], ["21.12.12", 41]]
for line, expected in zip(infile, expecteds):
result = patt.parseString(line)
print(result)
self.assertEqual(
expected, [result.date, result.num], msg="issue with GoToColumn"
)
# Column number does NOT match
patt = dateExpr("date") + pp.GoToColumn(30) + numExpr + pp.restOfLine
infile = iter(sample)
next(infile)
for line in infile:
with self.assertRaisesParseException(
msg="issue with GoToColumn not finding match"
):
result = patt.parseString(line)
def testExceptionExplainVariations(self):
class Modifier:
def modify_upper(self, tokens):
tokens[:] = map(str.upper, tokens)
modder = Modifier()
# force an exception in the attached parse action
# integer has a parse action to convert to an int;
# this parse action should fail with a TypeError, since
# str.upper expects a str argument, not an int
grammar = ppc.integer().addParseAction(modder.modify_upper)
self_testcase_name = "tests.test_unit." + type(self).__name__
try:
grammar.parseString("1000")
except Exception as e:
# extract the exception explanation
explain_str = ParseException.explain_exception(e)
print(explain_str)
explain_str_lines = explain_str.splitlines()
expected = [
self_testcase_name,
"pyparsing.core._WordRegex - integer",
"tests.test_unit.Modifier",
"pyparsing.results.ParseResults",
]
# verify the list of names shown in the explain "stack"
self.assertEqual(
expected,
explain_str_lines[-len(expected) :],
msg="invalid explain str",
)
# check type of raised exception matches explain output
# (actual exception text varies by Python version, and even
# by how the exception is raised, so we can only check the
# type name)
exception_line = explain_str_lines[-(len(expected) + 1)]
self.assertTrue(
exception_line.startswith("TypeError:"),
msg="unexpected exception line ({!r})".format(exception_line),
)
def testMiscellaneousExceptionBits(self):
pp.ParserElement.verbose_stacktrace = True
self_testcase_name = "tests.test_unit." + type(self).__name__
# force a parsing exception - match an integer against "ABC"
try:
pp.Word(pp.nums).parseString("ABC")
except pp.ParseException as pe:
with self.assertRaises(AttributeError):
print(pe.nonexistent_attribute)
expected_str = "Expected W:(0-9), found 'A' (at char 0), (line:1, col:1)"
self.assertEqual(expected_str, str(pe), "invalid ParseException str")
self.assertEqual(expected_str, repr(pe), "invalid ParseException repr")
expected_dir = [
"args",
"col",
"explain",
"explain_exception",
"line",
"lineno",
"markInputline",
"with_traceback",
]
observed_dir = [attr for attr in dir(pe) if not attr.startswith("_")]
print(observed_dir)
self.assertEqual(expected_dir, observed_dir, "invalid dir(ParseException)")
self.assertEqual(
">!<ABC", pe.markInputline(), "invalid default mark input line"
)
self.assertEqual(
| |
<filename>httprunner/loader.py
import collections
import csv
import importlib
import io
import json
import os
import sys
import yaml
from httprunner import built_in, exceptions, logger, parser, utils, validator
from httprunner.compat import OrderedDict
sys.path.insert(0, os.getcwd())
project_mapping = {
"debugtalk": {
"variables": {},
"functions": {}
},
"env": {},
"def-api": {},
"def-testcase": {}
}
""" dict: save project loaded api/testcases definitions, environments and debugtalk.py module.
"""
testcases_cache_mapping = {}
project_working_directory = os.getcwd()
###############################################################################
## file loader
###############################################################################
def _check_format(file_path, content):
""" check testcase format if valid
"""
# TODO: replace with JSON schema validation
if not content:
# testcase file content is empty
err_msg = u"Testcase file content is empty: {}".format(file_path)
logger.log_error(err_msg)
raise exceptions.FileFormatError(err_msg)
elif not isinstance(content, (list, dict)):
# testcase file content does not match testcase format
err_msg = u"Testcase file content format invalid: {}".format(file_path)
logger.log_error(err_msg)
raise exceptions.FileFormatError(err_msg)
def load_yaml_file(yaml_file):
""" load yaml file and check file content format
"""
with io.open(yaml_file, 'r', encoding='utf-8') as stream:
yaml_content = yaml.load(stream)
_check_format(yaml_file, yaml_content)
return yaml_content
def load_json_file(json_file):
""" load json file and check file content format
"""
with io.open(json_file, encoding='utf-8') as data_file:
try:
json_content = json.load(data_file)
except exceptions.JSONDecodeError:
err_msg = u"JSONDecodeError: JSON file format error: {}".format(json_file)
logger.log_error(err_msg)
raise exceptions.FileFormatError(err_msg)
_check_format(json_file, json_content)
return json_content
def load_csv_file(csv_file):
""" load csv file and check file content format
@param
csv_file: csv file path
e.g. csv file content:
username,password
test1,111111
test2,222222
test3,333333
@return
list of parameter, each parameter is in dict format
e.g.
[
{'username': 'test1', 'password': '<PASSWORD>'},
{'username': 'test2', 'password': '<PASSWORD>'},
{'username': 'test3', 'password': '<PASSWORD>'}
]
"""
csv_content_list = []
with io.open(csv_file, encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
csv_content_list.append(row)
return csv_content_list
def load_file(file_path):
if not os.path.isfile(file_path):
raise exceptions.FileNotFound("{} does not exist.".format(file_path))
file_suffix = os.path.splitext(file_path)[1].lower()
if file_suffix == '.json':
return load_json_file(file_path)
elif file_suffix in ['.yaml', '.yml']:
return load_yaml_file(file_path)
elif file_suffix == ".csv":
return load_csv_file(file_path)
else:
# '' or other suffix
err_msg = u"Unsupported file format: {}".format(file_path)
logger.log_warning(err_msg)
return []
def load_folder_files(folder_path, recursive=True):
""" load folder path, return all files endswith yml/yaml/json in list.
Args:
folder_path (str): specified folder path to load
recursive (bool): load files recursively if True
Returns:
list: files endswith yml/yaml/json
"""
if isinstance(folder_path, (list, set)):
files = []
for path in set(folder_path):
files.extend(load_folder_files(path, recursive))
return files
if not os.path.exists(folder_path):
return []
file_list = []
for dirpath, dirnames, filenames in os.walk(folder_path):
filenames_list = []
for filename in filenames:
if not filename.endswith(('.yml', '.yaml', '.json')):
continue
filenames_list.append(filename)
for filename in filenames_list:
file_path = os.path.join(dirpath, filename)
file_list.append(file_path)
if not recursive:
break
return file_list
def load_dot_env_file():
""" load .env file, .env file should be located in project working directory.
Returns:
dict: environment variables mapping
{
"UserName": "debugtalk",
"Password": "<PASSWORD>",
"PROJECT_KEY": "ABCDEFGH"
}
Raises:
exceptions.FileFormatError: If env file format is invalid.
"""
path = os.path.join(project_working_directory, ".env")
if not os.path.isfile(path):
logger.log_debug(".env file not exist in : {}".format(project_working_directory))
return {}
logger.log_info("Loading environment variables from {}".format(path))
env_variables_mapping = {}
with io.open(path, 'r', encoding='utf-8') as fp:
for line in fp:
if "=" in line:
variable, value = line.split("=")
elif ":" in line:
variable, value = line.split(":")
else:
raise exceptions.FileFormatError(".env format error")
env_variables_mapping[variable.strip()] = value.strip()
project_mapping["env"] = env_variables_mapping
utils.set_os_environ(env_variables_mapping)
return env_variables_mapping
def locate_file(start_path, file_name):
""" locate filename and return file path.
searching will be recursive upward until current working directory.
Args:
start_path (str): start locating path, maybe file path or directory path
Returns:
str: located file path. None if file not found.
Raises:
exceptions.FileNotFound: If failed to locate file.
"""
if os.path.isfile(start_path):
start_dir_path = os.path.dirname(start_path)
elif os.path.isdir(start_path):
start_dir_path = start_path
else:
raise exceptions.FileNotFound("invalid path: {}".format(start_path))
file_path = os.path.join(start_dir_path, file_name)
if os.path.isfile(file_path):
return file_path
# current working directory
if os.path.abspath(start_dir_path) in [os.getcwd(), os.path.abspath(os.sep)]:
raise exceptions.FileNotFound("{} not found in {}".format(file_name, start_path))
# locate recursive upward
return locate_file(os.path.dirname(start_dir_path), file_name)
###############################################################################
## debugtalk.py module loader
###############################################################################
def load_python_module(module):
""" load python module.
Args:
module: python module
Returns:
dict: variables and functions mapping for specified python module
{
"variables": {},
"functions": {}
}
"""
debugtalk_module = {
"variables": {},
"functions": {}
}
for name, item in vars(module).items():
if validator.is_function((name, item)):
debugtalk_module["functions"][name] = item
elif validator.is_variable((name, item)):
debugtalk_module["variables"][name] = item
else:
pass
return debugtalk_module
def load_builtin_module():
""" load built_in module
"""
built_in_module = load_python_module(built_in)
project_mapping["debugtalk"] = built_in_module
def load_debugtalk_module():
""" load project debugtalk.py module and merge with builtin module.
debugtalk.py should be located in project working directory.
variables and functions mapping for debugtalk.py
{
"variables": {},
"functions": {}
}
"""
# load debugtalk.py module
imported_module = importlib.import_module("debugtalk")
debugtalk_module = load_python_module(imported_module)
# override built_in module with debugtalk.py module
project_mapping["debugtalk"]["variables"].update(debugtalk_module["variables"])
project_mapping["debugtalk"]["functions"].update(debugtalk_module["functions"])
def get_module_item(module_mapping, item_type, item_name):
""" get expected function or variable from module mapping.
Args:
module_mapping(dict): module mapping with variables and functions.
{
"variables": {},
"functions": {}
}
item_type(str): "functions" or "variables"
item_name(str): function name or variable name
Returns:
object: specified variable or function object.
Raises:
exceptions.FunctionNotFound: If specified function not found in module mapping
exceptions.VariableNotFound: If specified variable not found in module mapping
"""
try:
return module_mapping[item_type][item_name]
except KeyError:
err_msg = "{} not found in debugtalk.py module!\n".format(item_name)
err_msg += "module mapping: {}".format(module_mapping)
if item_type == "functions":
raise exceptions.FunctionNotFound(err_msg)
else:
raise exceptions.VariableNotFound(err_msg)
###############################################################################
## testcase loader
###############################################################################
def _load_test_file(file_path):
""" load testcase file or testsuite file
Args:
file_path (str): absolute valid file path. file_path should be in the following format:
[
{
"config": {
"name": "",
"def": "suite_order()",
"request": {}
}
},
{
"test": {
"name": "add product to cart",
"api": "api_add_cart()",
"validate": []
}
},
{
"test": {
"name": "add product to cart",
"suite": "create_and_check()",
"validate": []
}
},
{
"test": {
"name": "checkout cart",
"request": {},
"validate": []
}
}
]
Returns:
dict: testcase dict
{
"config": {},
"teststeps": [teststep11, teststep12]
}
"""
testcase = {
"config": {},
"teststeps": []
}
for item in load_file(file_path):
# TODO: add json schema validation
if not isinstance(item, dict) or len(item) != 1:
raise exceptions.FileFormatError("Testcase format error: {}".format(file_path))
key, test_block = item.popitem()
if not isinstance(test_block, dict):
raise exceptions.FileFormatError("Testcase format error: {}".format(file_path))
if key == "config":
testcase["config"].update(test_block)
elif key == "test":
def extend_api_definition(block):
ref_call = block["api"]
def_block = _get_block_by_name(ref_call, "def-api")
_extend_block(block, def_block)
# reference api
if "api" in test_block:
extend_api_definition(test_block)
testcase["teststeps"].append(test_block)
# reference testcase
elif "suite" in test_block: # TODO: replace suite with testcase
ref_call = test_block["suite"]
block = _get_block_by_name(ref_call, "def-testcase")
# TODO: bugfix lost block config variables
for teststep in block["teststeps"]:
if "api" in teststep:
extend_api_definition(teststep)
testcase["teststeps"].append(teststep)
# define directly
else:
testcase["teststeps"].append(test_block)
else:
logger.log_warning(
"unexpected block key: {}. block key should only be 'config' or 'test'.".format(key)
)
return testcase
def _get_block_by_name(ref_call, ref_type):
""" get test content by reference name.
Args:
ref_call (str): call function.
e.g. api_v1_Account_Login_POST($UserName, $Password)
ref_type (enum): "def-api" or "def-testcase"
Returns:
dict: api/testcase definition.
Raises:
exceptions.ParamsError: call args number is not equal to defined args number.
"""
function_meta = parser.parse_function(ref_call)
func_name = function_meta["func_name"]
call_args = function_meta["args"]
block = _get_test_definition(func_name, ref_type)
def_args = block.get("function_meta", {}).get("args", [])
if len(call_args) != len(def_args):
err_msg = "{}: call args number is not equal to defined args number!\n".format(func_name)
err_msg += "defined args: {}\n".format(def_args)
err_msg += "reference args: {}".format(call_args)
logger.log_error(err_msg)
raise exceptions.ParamsError(err_msg)
args_mapping = {}
for index, item in enumerate(def_args):
if call_args[index] == item:
continue
args_mapping[item] = call_args[index]
if args_mapping:
block = parser.substitute_variables(block, args_mapping)
return block
def _get_test_definition(name, ref_type):
""" get expected api or testcase.
Args:
name (str): api or testcase name
ref_type (enum): "def-api" or "def-testcase"
Returns:
dict: expected api/testcase info if found.
Raises:
exceptions.ApiNotFound: api not found
exceptions.TestcaseNotFound: testcase not found
"""
block = project_mapping.get(ref_type, {}).get(name)
if not block:
err_msg = "{} not found!".format(name)
if ref_type == "def-api":
raise exceptions.ApiNotFound(err_msg)
else:
# ref_type == "def-testcase":
raise exceptions.TestcaseNotFound(err_msg)
return block
def _extend_block(ref_block, def_block):
""" extend ref_block with def_block.
Args:
def_block (dict): api definition dict.
ref_block (dict): reference block
Returns:
dict: extended reference block.
Examples:
>>> def_block = {
"name": "get token 1",
"request": {...},
"validate": [{'eq': ['status_code', 200]}]
}
>>> ref_block = {
"name": "get token 2",
"extract": [{"token": "content.token"}],
"validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}]
}
>>> _extend_block(def_block, ref_block)
{
"name": "get token 2",
"request": {...},
"extract": [{"token": "content.token"}],
"validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}]
}
"""
# TODO: override variables
def_validators = def_block.get("validate") or def_block.get("validators", [])
ref_validators = ref_block.get("validate") or ref_block.get("validators", [])
def_extrators = def_block.get("extract") \
or | |
'.' + 'testMemeValidity'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, "Meme_Validity.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
memeValid = False
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, metameme %s" %(n, stringArray[0])])
expectedTestResult = False
if stringArray[1] == 'TRUE':
expectedTestResult = True
try:
memeToTest = Graph.templateRepository.resolveTemplateAbsolutely(stringArray[0])
memeValidReport = memeToTest.validate([])
memeValid = memeValidReport[0]
if expectedTestResult != memeValid:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "testkey %s has an unexpected validity status" %(memeToTest.path.fullTemplatePath)])
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(memeValid)
expectedResult = stringArray[1]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testMemeSingleton():
method = moduleName + '.' + 'testMemeSingleton'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, "Meme_Singleton.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, metameme %s" %(n, stringArray[0])])
expectedTestResult = False
if stringArray[1] == 'TRUE':
expectedTestResult = True
testResult = False
try:
mmToTest = Graph.templateRepository.templates[stringArray[0]]
if expectedTestResult == mmToTest.isSingleton:
if mmToTest.entityUUID is not None:
testResult = True
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "meme %s has no deployed entity" %(stringArray[0])])
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "meme %s has an unexpected singleton status" %(stringArray[0])])
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[1]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase1(phaseName = 'testEntityPhase1', fName = "Entity_Phase1.atest"):
''' Create the entity from the meme and add it to the entity repo.
Retrieve the entity.
Check to see if it has the properties it is supposed to,
if the type is correct and if the value is correct.
Entity Phase 5 also uses this function
'''
method = moduleName + '.' + phaseName
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.INFO , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
entityID = Graph.api.createEntityFromMeme(stringArray[0])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Entity UUID = %s" %(entityID)])
propTypeCorrect = False
propValueCorrect = False
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
hasProp = Graph.api.getEntityHasProperty(entityID, stringArray[1])
if hasProp == False:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entity from meme %s does not have property %s" %(entityID, stringArray[1])])
else:
propType = Graph.api.getEntityPropertyType(entityID, stringArray[1])
if stringArray[2] == propType:
propTypeCorrect = True
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s in entity from meme %s is wrong type. Expected %s. Got %s" %(stringArray[1], entityID, stringArray[2], propType)])
propValue = Graph.api.getEntityPropertyValue(entityID, stringArray[1])
if propType == 'Boolean':
expValue = False
if stringArray[3].lower() == "true":
expValue = True
if propValue == expValue:
propValueCorrect = True
elif propType == 'Decimal':
expValue = decimal.Decimal(stringArray[3])
if propValue == expValue:
propValueCorrect = True
elif propType == 'Integer':
expValue = int(stringArray[3])
if propValue == expValue:
propValueCorrect = True
else:
if propValue == stringArray[3]:
propValueCorrect = True
if propValueCorrect == False:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s in entity from meme %s is wrong value. Expected %s. Got %s" %(stringArray[1], stringArray[0], stringArray[3], propValue)])
if (propValueCorrect == True) and (propTypeCorrect == True) and (hasProp == True):
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[4]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase1_1(phaseName = 'testEntityPhase1_1', fName = "Entity_Phase1.atest"):
''' a repeat of testEntityPhase1, but using the Python script interface instead of going directly against Graph.api
Tests the following script commands:
createEntityFromMeme
getEntityHasProperty
getEntityPropertyType
getEntityPropertyValue
'''
method = moduleName + '.' + phaseName
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.INFO , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
#entityID = Graph.api.createEntityFromMeme(stringArray[0])
entityID = api.createEntityFromMeme(stringArray[0])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Entity UUID = %s" %(entityID)])
propTypeCorrect = False
propValueCorrect = False
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
#hasProp = Graph.api.getEntityHasProperty(entityID, stringArray[1])
hasProp = api.getEntityHasProperty(entityID, stringArray[1])
if hasProp == False:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entity from meme %s does not have property %s" %(entityID, stringArray[1])])
else:
#propType = Graph.api.getEntityPropertyType(entityID, stringArray[1])
propType = api.getEntityPropertyType(entityID, stringArray[1])
if stringArray[2] == propType:
propTypeCorrect = True
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s in entity from meme %s is wrong type. Expected %s. Got %s" %(stringArray[1], entityID, stringArray[2], propType)])
#propValue = Graph.api.getEntityPropertyValue(entityID, stringArray[1])
propValue = api.getEntityPropertyValue(entityID, stringArray[1])
if propType == 'Boolean':
expValue = False
if stringArray[3].lower() == "true":
expValue = True
if propValue == expValue:
propValueCorrect = True
elif propType == 'Decimal':
expValue = decimal.Decimal(stringArray[3])
if propValue == expValue:
propValueCorrect = True
elif propType == 'Integer':
expValue = int(stringArray[3])
if propValue == expValue:
propValueCorrect = True
else:
if propValue == stringArray[3]:
propValueCorrect = True
if propValueCorrect == False:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s in entity from meme %s is wrong value. Expected %s. Got %s" %(stringArray[1], stringArray[0], stringArray[3], propValue)])
if (propValueCorrect == True) and (propTypeCorrect == True) and (hasProp == True):
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[4]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase2(testPhase = 'testEntityPhase2', fileName = 'Entity_Phase2.atest'):
''' Change the values of the various properties.
Can we change the value to the desired value and are constraints working? '''
method = moduleName + '.' + testPhase
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, fileName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
entityID = Graph.api.createEntityFromMeme(stringArray[0])
Graph.api.setEntityPropertyValue(entityID, stringArray[1], stringArray[2])
getter = Graph.api.getEntityPropertyValue(entityID, stringArray[1])
propType = Graph.api.getEntityPropertyType(entityID, stringArray[1])
#reformat the expected result from unicode string to that which is expected in the property
expectedResult = None
if propType == "String":
expectedResult = stringArray[2]
elif propType == "Integer":
expectedResult = int(stringArray[2])
elif propType == "Decimal":
expectedResult = decimal.Decimal(stringArray[2])
else: | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ConnectionArgs', 'Connection']
@pulumi.input_type
class ConnectionArgs:
def __init__(__self__, *,
connection_id: pulumi.Input[str],
connector_version: pulumi.Input[str],
auth_config: Optional[pulumi.Input['AuthConfigArgs']] = None,
config_variables: Optional[pulumi.Input[Sequence[pulumi.Input['ConfigVariableArgs']]]] = None,
description: Optional[pulumi.Input[str]] = None,
inactive: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
lock_config: Optional[pulumi.Input['LockConfigArgs']] = None,
project: Optional[pulumi.Input[str]] = None,
service_account: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Connection resource.
:param pulumi.Input[str] connector_version: Connector version on which the connection is created. The format is: projects/*/locations/global/providers/*/connectors/*/versions/*
:param pulumi.Input['AuthConfigArgs'] auth_config: Optional. Configuration for establishing the connection's authentication with an external system.
:param pulumi.Input[Sequence[pulumi.Input['ConfigVariableArgs']]] config_variables: Optional. Configuration for configuring the connection with an external system.
:param pulumi.Input[str] description: Optional. Description of the resource.
:param pulumi.Input[bool] inactive: Optional. Inactive indicates the connection is active to use or not.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. Resource labels to represent user-provided metadata. Refer to cloud documentation on labels for more details. https://cloud.google.com/compute/docs/labeling-resources
:param pulumi.Input['LockConfigArgs'] lock_config: Optional. Configuration that indicates whether or not the Connection can be edited.
:param pulumi.Input[str] service_account: Optional. Service account needed for runtime plane to access GCP resources.
"""
pulumi.set(__self__, "connection_id", connection_id)
pulumi.set(__self__, "connector_version", connector_version)
if auth_config is not None:
pulumi.set(__self__, "auth_config", auth_config)
if config_variables is not None:
pulumi.set(__self__, "config_variables", config_variables)
if description is not None:
pulumi.set(__self__, "description", description)
if inactive is not None:
pulumi.set(__self__, "inactive", inactive)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if location is not None:
pulumi.set(__self__, "location", location)
if lock_config is not None:
pulumi.set(__self__, "lock_config", lock_config)
if project is not None:
pulumi.set(__self__, "project", project)
if service_account is not None:
pulumi.set(__self__, "service_account", service_account)
@property
@pulumi.getter(name="connectionId")
def connection_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "connection_id")
@connection_id.setter
def connection_id(self, value: pulumi.Input[str]):
pulumi.set(self, "connection_id", value)
@property
@pulumi.getter(name="connectorVersion")
def connector_version(self) -> pulumi.Input[str]:
"""
Connector version on which the connection is created. The format is: projects/*/locations/global/providers/*/connectors/*/versions/*
"""
return pulumi.get(self, "connector_version")
@connector_version.setter
def connector_version(self, value: pulumi.Input[str]):
pulumi.set(self, "connector_version", value)
@property
@pulumi.getter(name="authConfig")
def auth_config(self) -> Optional[pulumi.Input['AuthConfigArgs']]:
"""
Optional. Configuration for establishing the connection's authentication with an external system.
"""
return pulumi.get(self, "auth_config")
@auth_config.setter
def auth_config(self, value: Optional[pulumi.Input['AuthConfigArgs']]):
pulumi.set(self, "auth_config", value)
@property
@pulumi.getter(name="configVariables")
def config_variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConfigVariableArgs']]]]:
"""
Optional. Configuration for configuring the connection with an external system.
"""
return pulumi.get(self, "config_variables")
@config_variables.setter
def config_variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConfigVariableArgs']]]]):
pulumi.set(self, "config_variables", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Description of the resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def inactive(self) -> Optional[pulumi.Input[bool]]:
"""
Optional. Inactive indicates the connection is active to use or not.
"""
return pulumi.get(self, "inactive")
@inactive.setter
def inactive(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "inactive", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional. Resource labels to represent user-provided metadata. Refer to cloud documentation on labels for more details. https://cloud.google.com/compute/docs/labeling-resources
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="lockConfig")
def lock_config(self) -> Optional[pulumi.Input['LockConfigArgs']]:
"""
Optional. Configuration that indicates whether or not the Connection can be edited.
"""
return pulumi.get(self, "lock_config")
@lock_config.setter
def lock_config(self, value: Optional[pulumi.Input['LockConfigArgs']]):
pulumi.set(self, "lock_config", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Service account needed for runtime plane to access GCP resources.
"""
return pulumi.get(self, "service_account")
@service_account.setter
def service_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account", value)
class Connection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auth_config: Optional[pulumi.Input[pulumi.InputType['AuthConfigArgs']]] = None,
config_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigVariableArgs']]]]] = None,
connection_id: Optional[pulumi.Input[str]] = None,
connector_version: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
inactive: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
lock_config: Optional[pulumi.Input[pulumi.InputType['LockConfigArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
service_account: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a new Connection in a given project and location.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AuthConfigArgs']] auth_config: Optional. Configuration for establishing the connection's authentication with an external system.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigVariableArgs']]]] config_variables: Optional. Configuration for configuring the connection with an external system.
:param pulumi.Input[str] connector_version: Connector version on which the connection is created. The format is: projects/*/locations/global/providers/*/connectors/*/versions/*
:param pulumi.Input[str] description: Optional. Description of the resource.
:param pulumi.Input[bool] inactive: Optional. Inactive indicates the connection is active to use or not.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. Resource labels to represent user-provided metadata. Refer to cloud documentation on labels for more details. https://cloud.google.com/compute/docs/labeling-resources
:param pulumi.Input[pulumi.InputType['LockConfigArgs']] lock_config: Optional. Configuration that indicates whether or not the Connection can be edited.
:param pulumi.Input[str] service_account: Optional. Service account needed for runtime plane to access GCP resources.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new Connection in a given project and location.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param ConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auth_config: Optional[pulumi.Input[pulumi.InputType['AuthConfigArgs']]] = None,
config_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigVariableArgs']]]]] = None,
connection_id: Optional[pulumi.Input[str]] = None,
connector_version: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
inactive: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
lock_config: Optional[pulumi.Input[pulumi.InputType['LockConfigArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
service_account: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ConnectionArgs.__new__(ConnectionArgs)
__props__.__dict__["auth_config"] = auth_config
__props__.__dict__["config_variables"] = config_variables
if connection_id is None and not opts.urn:
raise TypeError("Missing required property 'connection_id'")
__props__.__dict__["connection_id"] = connection_id
if connector_version is None and not opts.urn:
raise TypeError("Missing required property 'connector_version'")
__props__.__dict__["connector_version"] = connector_version
__props__.__dict__["description"] = description
__props__.__dict__["inactive"] = inactive
__props__.__dict__["labels"] = labels
__props__.__dict__["location"] = location
__props__.__dict__["lock_config"] = lock_config
__props__.__dict__["project"] = project
__props__.__dict__["service_account"] = service_account
__props__.__dict__["create_time"] = None
__props__.__dict__["egress_backends"] = None
__props__.__dict__["envoy_image_location"] = None
__props__.__dict__["image_location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["service_directory"] = None
__props__.__dict__["status"] = None
__props__.__dict__["update_time"] = None
super(Connection, __self__).__init__(
'google-native:connectors/v1:Connection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Connection':
"""
Get an existing Connection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ConnectionArgs.__new__(ConnectionArgs)
__props__.__dict__["auth_config"] = None
__props__.__dict__["config_variables"] = None
__props__.__dict__["connector_version"] = None
__props__.__dict__["create_time"] = None
__props__.__dict__["description"] = None
__props__.__dict__["egress_backends"] = None
__props__.__dict__["envoy_image_location"] = None
__props__.__dict__["image_location"] = None
__props__.__dict__["inactive"] = None
__props__.__dict__["labels"] = None
__props__.__dict__["lock_config"] = None
__props__.__dict__["name"] = None
__props__.__dict__["service_account"] = None
__props__.__dict__["service_directory"] = None
__props__.__dict__["status"] = None
__props__.__dict__["update_time"] = None
return Connection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authConfig")
def auth_config(self) -> pulumi.Output['outputs.AuthConfigResponse']:
"""
Optional. Configuration for establishing the connection's authentication with an external system.
"""
return pulumi.get(self, "auth_config")
@property
@pulumi.getter(name="configVariables")
def config_variables(self) -> pulumi.Output[Sequence['outputs.ConfigVariableResponse']]:
"""
Optional. Configuration for configuring the connection with an external system.
"""
return pulumi.get(self, "config_variables")
@property
@pulumi.getter(name="connectorVersion")
def connector_version(self) -> pulumi.Output[str]:
"""
Connector version on which the connection is created. The format is: projects/*/locations/global/providers/*/connectors/*/versions/*
"""
return pulumi.get(self, "connector_version")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
| |
#!/usr/bin/env python
# reference: c041828_ISO_IEC_14496-12_2005(E).pdf
##################################################
# reader and writer
##################################################
import struct
from io import BytesIO
def skip(stream, n):
stream.seek(stream.tell() + n)
def skip_zeros(stream, n):
assert stream.read(n) == b'\x00' * n
def read_int(stream):
return struct.unpack('>i', stream.read(4))[0]
def read_uint(stream):
return struct.unpack('>I', stream.read(4))[0]
def write_uint(stream, n):
stream.write(struct.pack('>I', n))
def write_ulong(stream, n):
stream.write(struct.pack('>Q', n))
def read_ushort(stream):
return struct.unpack('>H', stream.read(2))[0]
def read_ulong(stream):
return struct.unpack('>Q', stream.read(8))[0]
def read_byte(stream):
return ord(stream.read(1))
def copy_stream(source, target, n):
buffer_size = 1024 * 1024
while n > 0:
to_read = min(buffer_size, n)
s = source.read(to_read)
assert len(s) == to_read, 'no enough data'
target.write(s)
n -= to_read
class Atom:
def __init__(self, type, size, body):
assert len(type) == 4
self.type = type
self.size = size
self.body = body
def __str__(self):
# return '<Atom(%s):%s>' % (self.type, repr(self.body))
return '<Atom(%s):%s>' % (self.type, '')
def __repr__(self):
return str(self)
def write1(self, stream):
write_uint(stream, self.size)
stream.write(self.type)
def write(self, stream):
assert type(self.body) == bytes, '%s: %s' % (self.type, type(self.body))
assert self.size == 8 + len(self.body)
self.write1(stream)
stream.write(self.body)
def calsize(self):
return self.size
class CompositeAtom(Atom):
def __init__(self, type, size, body):
assert isinstance(body, list)
Atom.__init__(self, type, size, body)
def write(self, stream):
assert type(self.body) == list
self.write1(stream)
for atom in self.body:
atom.write(stream)
def calsize(self):
self.size = 8 + sum([atom.calsize() for atom in self.body])
return self.size
def get1(self, k):
for a in self.body:
if a.type == k:
return a
else:
raise Exception('atom not found: ' + k)
def get(self, *keys):
atom = self
for k in keys:
atom = atom.get1(k)
return atom
def get_all(self, k):
return list(filter(lambda x: x.type == k, self.body))
class VariableAtom(Atom):
def __init__(self, type, size, body, variables):
assert isinstance(body, bytes)
Atom.__init__(self, type, size, body)
self.variables = variables
def write(self, stream):
self.write1(stream)
i = 0
n = 0
for name, offset, value, bsize in self.variables:
stream.write(self.body[i:offset])
if bsize == 4:
write_uint(stream, value)
elif bsize == 8:
write_ulong(stream, value)
else:
raise NotImplementedError()
n += offset - i + bsize
i = offset + bsize
stream.write(self.body[i:])
n += len(self.body) - i
assert n == len(self.body)
def get(self, k):
for v in self.variables:
if v[0] == k:
return v[2]
else:
raise Exception('field not found: ' + k)
def set(self, k, v):
for i in range(len(self.variables)):
variable = self.variables[i]
if variable[0] == k:
self.variables[i] = (k, variable[1], v, variable[3])
break
else:
raise Exception('field not found: ' + k)
def read_raw(stream, size, left, type):
assert size == left + 8
body = stream.read(left)
return Atom(type, size, body)
def read_udta(stream, size, left, type):
assert size == left + 8
body = stream.read(left)
class Udta(Atom):
def write(self, stream):
return
def calsize(self):
return 0
return Udta(type, size, body)
def read_body_stream(stream, left):
body = stream.read(left)
assert len(body) == left
return body, BytesIO(body)
def read_full_atom(stream):
value = read_uint(stream)
version = value >> 24
flags = value & 0xffffff
assert version == 0
return value
def read_full_atom2(stream):
value = read_uint(stream)
version = value >> 24
flags = value & 0xffffff
return version, value
def read_mvhd(stream, size, left, type):
body, stream = read_body_stream(stream, left)
value = read_full_atom(stream)
left -= 4
# new Date(movieTime * 1000 - 2082850791998L);
creation_time = read_uint(stream)
modification_time = read_uint(stream)
time_scale = read_uint(stream)
duration = read_uint(stream)
left -= 16
qt_preferred_fate = read_uint(stream)
qt_preferred_volume = read_ushort(stream)
assert stream.read(10) == b'\x00' * 10
qt_matrixA = read_uint(stream)
qt_matrixB = read_uint(stream)
qt_matrixU = read_uint(stream)
qt_matrixC = read_uint(stream)
qt_matrixD = read_uint(stream)
qt_matrixV = read_uint(stream)
qt_matrixX = read_uint(stream)
qt_matrixY = read_uint(stream)
qt_matrixW = read_uint(stream)
qt_previewTime = read_uint(stream)
qt_previewDuration = read_uint(stream)
qt_posterTime = read_uint(stream)
qt_selectionTime = read_uint(stream)
qt_selectionDuration = read_uint(stream)
qt_currentTime = read_uint(stream)
nextTrackID = read_uint(stream)
left -= 80
assert left == 0
return VariableAtom(b'mvhd', size, body, [('duration', 16, duration, 4)])
def read_tkhd(stream, size, left, type):
body, stream = read_body_stream(stream, left)
value = read_full_atom(stream)
left -= 4
# new Date(movieTime * 1000 - 2082850791998L);
creation_time = read_uint(stream)
modification_time = read_uint(stream)
track_id = read_uint(stream)
assert stream.read(4) == b'\x00' * 4
duration = read_uint(stream)
left -= 20
assert stream.read(8) == b'\x00' * 8
qt_layer = read_ushort(stream)
qt_alternate_group = read_ushort(stream)
qt_volume = read_ushort(stream)
assert stream.read(2) == b'\x00\x00'
qt_matrixA = read_uint(stream)
qt_matrixB = read_uint(stream)
qt_matrixU = read_uint(stream)
qt_matrixC = read_uint(stream)
qt_matrixD = read_uint(stream)
qt_matrixV = read_uint(stream)
qt_matrixX = read_uint(stream)
qt_matrixY = read_uint(stream)
qt_matrixW = read_uint(stream)
qt_track_width = read_uint(stream)
width = qt_track_width >> 16
qt_track_height = read_uint(stream)
height = qt_track_height >> 16
left -= 60
assert left == 0
return VariableAtom(b'tkhd', size, body, [('duration', 20, duration, 4)])
def read_mdhd(stream, size, left, type):
body, stream = read_body_stream(stream, left)
ver, value = read_full_atom2(stream)
left -= 4
if ver == 1:
creation_time = read_ulong(stream)
modification_time = read_ulong(stream)
time_scale = read_uint(stream)
duration = read_ulong(stream)
var = [('duration', 24, duration, 8)]
left -= 28
else:
assert ver == 0, "ver=%d" % ver
creation_time = read_uint(stream)
modification_time = read_uint(stream)
time_scale = read_uint(stream)
duration = read_uint(stream)
var = [('duration', 16, duration, 4)]
left -= 16
packed_language = read_ushort(stream)
qt_quality = read_ushort(stream)
left -= 4
assert left == 0
return VariableAtom(b'mdhd', size, body, var)
def read_hdlr(stream, size, left, type):
body, stream = read_body_stream(stream, left)
value = read_full_atom(stream)
left -= 4
qt_component_type = read_uint(stream)
handler_type = read_uint(stream)
qt_component_manufacturer = read_uint(stream)
qt_component_flags = read_uint(stream)
qt_component_flags_mask = read_uint(stream)
left -= 20
track_name = stream.read(left)
# assert track_name[-1] == b'\x00'
return Atom(b'hdlr', size, body)
def read_vmhd(stream, size, left, type):
body, stream = read_body_stream(stream, left)
value = read_full_atom(stream)
left -= 4
assert left == 8
graphic_mode = read_ushort(stream)
op_color_read = read_ushort(stream)
op_color_green = read_ushort(stream)
op_color_blue = read_ushort(stream)
return Atom(b'vmhd', size, body)
def read_stsd(stream, size, left, type):
value = read_full_atom(stream)
left -= 4
entry_count = read_uint(stream)
left -= 4
children = []
for i in range(entry_count):
atom = read_atom(stream)
children.append(atom)
left -= atom.size
assert left == 0
# return Atom('stsd', size, children)
class stsd_atom(Atom):
def __init__(self, type, size, body):
Atom.__init__(self, type, size, body)
def write(self, stream):
self.write1(stream)
write_uint(stream, self.body[0])
write_uint(stream, len(self.body[1]))
for atom in self.body[1]:
atom.write(stream)
def calsize(self):
oldsize = self.size # TODO: remove
self.size = 8 + 4 + 4 + sum([atom.calsize() for atom in self.body[1]])
assert oldsize == self.size, '%s: %d, %d' % (self.type, oldsize, self.size) # TODO: remove
return self.size
return stsd_atom(b'stsd', size, (value, children))
def read_avc1(stream, size, left, type):
body, stream = read_body_stream(stream, left)
skip_zeros(stream, 6)
data_reference_index = read_ushort(stream)
skip_zeros(stream, 2)
skip_zeros(stream, 2)
skip_zeros(stream, 12)
width = read_ushort(stream)
height = read_ushort(stream)
horizontal_rez = read_uint(stream) >> 16
vertical_rez = read_uint(stream) >> 16
assert stream.read(4) == b'\x00' * 4
frame_count = read_ushort(stream)
string_len = read_byte(stream)
compressor_name = stream.read(31)
depth = read_ushort(stream)
assert stream.read(2) == b'\xff\xff'
left -= 78
child = read_atom(stream)
assert child.type in (b'avcC', b'pasp'), 'if the sub atom is not avcC or pasp (actual %s), you should not cache raw body' % child.type
left -= child.size
stream.read(left) # XXX
return Atom(b'avc1', size, body)
def read_avcC(stream, size, left, type):
stream.read(left)
return Atom(b'avcC', size, None)
def read_stts(stream, size, left, type):
value = read_full_atom(stream)
left -= 4
entry_count = read_uint(stream)
# assert entry_count == 1
left -= 4
samples = []
for i in range(entry_count):
sample_count = read_uint(stream)
sample_duration = read_uint(stream)
samples.append((sample_count, sample_duration))
left -= 8
assert left == 0
# return Atom('stts', size, None)
class stts_atom(Atom):
def __init__(self, type, size, body):
Atom.__init__(self, type, size, body)
def write(self, stream):
self.write1(stream)
write_uint(stream, self.body[0])
write_uint(stream, len(self.body[1]))
for sample_count, sample_duration in self.body[1]:
write_uint(stream, sample_count)
write_uint(stream, sample_duration)
def calsize(self):
# oldsize = self.size # TODO: remove
self.size = 8 + 4 + 4 + len(self.body[1]) * 8
# assert oldsize == self.size, '%s: %d, %d' % (self.type, oldsize, self.size) # TODO: remove
return self.size
return stts_atom(b'stts', size, (value, samples))
def read_stss(stream, size, left, type):
value = read_full_atom(stream)
left -= 4
entry_count = read_uint(stream)
left -= 4
samples = []
for i in range(entry_count):
sample = read_uint(stream)
samples.append(sample)
left -= 4
assert left == 0
# return Atom('stss', size, None)
class stss_atom(Atom):
def __init__(self, type, size, body):
Atom.__init__(self, type, size, body)
def write(self, stream):
self.write1(stream)
write_uint(stream, self.body[0])
write_uint(stream, len(self.body[1]))
for sample in self.body[1]:
write_uint(stream, sample)
def calsize(self):
self.size = 8 + 4 + 4 + len(self.body[1]) * 4
return self.size
return stss_atom(b'stss', size, (value, samples))
def read_stsc(stream, size, left, type):
value = read_full_atom(stream)
left -= 4
entry_count = read_uint(stream)
left -= 4
| |
<reponame>dgasmith/EEX
"""
Contains the DataLayer class (name in progress) which takes and reads various pieces of data
"""
import copy
import json
import os
import numpy as np
import pandas as pd
from . import energy_eval
from . import filelayer
from . import metadata
from . import units
from . import utility
from . import testing
from . import nb_converter
APC_DICT = metadata.atom_property_to_column
class DataLayer(object):
def __init__(self, name, store_location=None, save_data=False, backend="Memory"):
"""
Initializes the DataLayer class
Parameters
----------
name : str
The name of the energy expression stored
store_location : {None, str}, optional
The location to store the temporary data during the translation. Defaults to the current working directory.
save_data : {False, True}, optional
Decides whether to delete the store data upon destruction of the DataLayer object.
backend : {"HDF5", "memory"}, optional
Storage backend for the energy expression.
"""
# Set the state
self.name = name
# Build the store
self.store_location = store_location
if self.store_location is None:
self.store_location = os.getcwd()
self.store = filelayer.build_store(backend, self.name, self.store_location, save_data)
# Setup empty term holder
self._terms = {order: {} for order in [2, 3, 4]}
self._term_count = {order: {"total": 0} for order in [2, 3, 4]}
# Setup atom holders
self._atom_metadata = {}
self._atom_counts = {}
# Create structure of _atom_metadata dictionary
for k, v in metadata.atom_metadata.items():
if not v["unique"]:
self._atom_metadata[k] = {"uvals": {}, "inv_uvals": {}}
self._atom_counts = {k: 0 for k in list(metadata.atom_metadata)}
# Set up empty nonbond holders
self._nb_parameters = {}
self._nb_scaling_factors = {}
self._nb_metadata = {}
# Any remaining metadata
self._box_size = {}
self._box_center = {}
self._mixing_rule = ''
### Generic helper close/save/list/etc functions
def call_by_string(self, *args, **kwargs):
"""
Adds the ability to call DL function by their string name.
"""
if args[0] == "NYI":
return False
try:
func = getattr(self, args[0])
except AttributeError:
raise AttributeError("DataLayer:call_by_string: does not have method %s." % args[0])
return func(*args[1:], **kwargs)
def close(self):
"""
Closes the DL object
"""
self.store.close()
def list_tables(self):
"""
Lists tables loaded into the store.
"""
return [x for x in self.store.list_tables() if not x.startswith("other_")]
def list_other_tables(self):
"""
Lists "other" tables loaded into the store.
"""
return [x.replace("other_", "") for x in self.store.list_tables() if x.startswith("other_")]
def set_mixing_rule(self, mixing_rule):
"""
Store a mixing rule in the datalayer.
Parameters:
------------------------------------
mixing_rule: str
Mixing rule to apply to calculate nonbonded parameters for pairs of atoms. Valid mixing rules are listed in
nb_converter.LJ_mixing_functions
"""
if not isinstance(mixing_rule, str):
raise TypeError("Validate mixing rule: %s is not a string" % mixing_rule)
mixing_metadata = nb_converter.LJ_mixing_functions
keys = mixing_metadata.keys()
if mixing_rule not in keys:
raise ValueError("Mixing rule type %s not found" % mixing_rule)
self._mixing_rule = mixing_rule
def get_mixing_rule(self):
""" Retrieve the stored mixing rule from the datalayer. Returns a string """
ret = copy.deepcopy(self._mixing_rule)
return ret
def set_box_center(self, box_center, utype=None):
"""
Sets the center of the box.
"""
# Get box metadata
box_metadata = metadata.box_metadata
dimensions = box_metadata["center"]
# Make sure we have all keywords that define a simulation box
for k in dimensions:
if k.lower() not in box_center and k.upper() not in box_center:
raise KeyError("Could not find key '%s'." % k)
if utype is not None:
if not isinstance(utype, dict):
raise TypeError("Validate term dict: Unit type '%s' not understood" % str(type(utype)))
# Convert to internal units
for k, v in dimensions.items():
internal = units.convert_contexts(v)
cf = units.conversion_factor(utype[k], internal)
self._box_center[k] = cf * box_center[k]
else:
for k, v in box_center.items():
self._box_center[k] = v
def get_box_center(self, utype=None):
"""
Gets the overall size of the box for the datalayer
"""
ret = copy.deepcopy(self._box_center)
# Get information for internal representation of box dimensions
box_metadata = metadata.box_metadata
dimensions = box_metadata["center"]
if utype is not None and ret:
if not isinstance(utype, dict):
raise TypeError("Validate term dict: Unit type '%s' not understood" % str(type(utype)))
# Convert to internal units
for k, v in dimensions.items():
internal = units.convert_contexts(v)
cf = units.conversion_factor(internal, utype[k])
ret[k] *= cf
return ret
else:
return ret
def set_nb_scaling_factors(self, nb_scaling_factors):
"""
Sets the exclusion information for the datalayer
Parameters
------
nb_scaling_factor: dict
Format is:
nb_scaling_factors = {
"coul":{
"scale12": "dimensionless",
"scale13": "dimensionless",
"scale14": "dimensionless",
},
"vdw":{
"scale12": "dimensionless",
"scale13": "dimensionless",
"scale14": "dimensionless",
}
}
"""
if not isinstance(nb_scaling_factors, dict):
raise TypeError("Exclusion information cannot be validated as dictionary '%s'"% str(type(nb_scaling_factors)))
exclusions_metadata = metadata.exclusions
# Make sure we have all keywords
if nb_scaling_factors.keys() != exclusions_metadata.keys():
raise KeyError("Not all exclusion keywords are imported")
# Make sure scaling factors make sense
for ok, ov in nb_scaling_factors.items():
for k, v in ov.items():
if v > 1.0 or v < 0.0:
raise ValueError("Exclusion value outside bounds '%s'." % v)
self._nb_scaling_factors = nb_scaling_factors
def get_nb_scaling_factors(self):
"""
Retrieves nonbonded scaling factors from datalayer.
"""
ret = copy.deepcopy(self._nb_scaling_factors)
return ret
def set_nb_pair_interaction(self):
"""
Set a special interaction between two particles
:return:
"""
return False
def set_pair_scalings(self, scaling_df):
"""
Set scaling factor for nonbond interaction between two atoms using multi-level indexing.
Parameters:
--------------------
scaling_df: DataFrame
Columns of the dataframe should be -
atom_index1: int
The atom index of the first atom
atom_index2: int
The atom index of the second atom
vdw_scale: float
coul_scale: float
Returns:
-------------------
Returns: bool
True if successful
"""
possible_columns = [y for x in metadata.additional_metadata.nb_scaling.values() for y in x]
# Check the columns of the dataframe
for col in scaling_df.columns:
if col not in possible_columns:
raise KeyError ("Column %s not recognized in set_pair_scalings." %(col))
# Check to make sure atom_type1 and atom_type2 are set in dataframe
for col in metadata.additional_metadata.nb_scaling["index"]:
if col not in scaling_df.columns:
raise KeyError("%s not found in scaling dataframe (set_pair_scalings)" %(col))
if not np.issubdtype(scaling_df[col].dtype, int):
raise TypeError("%s column is type %s. Should be integer" %(col, scaling_df[col].dtype) )
# Make sure at least one scaling factor is set
if len(scaling_df.columns) < 3:
raise ValueError("No scaling factors set in set_pair_scalings")
# Check that scalings are type float
# Build multi-level indexer
index = pd.MultiIndex.from_arrays([scaling_df["atom_index1"], scaling_df["atom_index2"]])
for l in ["vdw_scale", "coul_scale"]:
if l in scaling_df.columns:
df = pd.Series(scaling_df[l].tolist(), index=index)
self.store.add_table(l, df)
return True
def get_pair_scalings(self, nb_labels=["vdw_scale", "coul_scale"]):
"""
Get scaling factor for nonbond interaction between two atoms
Parameters
------------------------------------
nb_labels: list
Returns
------------------------------------
pd.DataFrame
"""
for k in nb_labels:
if k not in metadata.additional_metadata.nb_scaling["data"]:
raise KeyError("%s is not a valid nb_scale type" %(k))
rlist = []
rlabels = []
for label in nb_labels:
rlabels.append(label)
rlist.append(self.store.read_table(label))
ret = pd.concat(rlist, axis=1)
ret.columns = rlabels
return ret
def build_scaling_list(self):
"""
Build pair scalings based on parameters set in set_nb_scaling_factors.
"""
scaling_factors = self.get_nb_scaling_factors()
for k, v in scaling_factors.items():
for scale, val in v.items():
order = int(scale[-1])
terms = self.get_terms(order)
store_df = pd.DataFrame()
store_df["atom_index1"] = terms["atom1"]
store_df["atom_index2"] = terms["atom"+scale[-1]]
store_df[k + "_scale"] = val
if not store_df.empty and val is not 1.:
self.set_pair_scalings(store_df)
return True
def set_box_size(self, lattice_const, utype=None):
"""
Sets the box lattice constants for the datalayer
Inputs
-----------------------------
lattice_const: dict
Dictionary containing box dimensions
{ 'a' : [length],
'b' : [length],
'c': [length],
'alpha': [angle],
'beta': [angle],
'gamma': [angle],
}
"""
# Get box metadata
box_metadata = metadata.box_metadata
dimensions = box_metadata["dimensions"]
# Make sure we have all keywords that define a simulation box
for k in dimensions:
if k.lower() not in lattice_const and k.upper() not in lattice_const:
raise KeyError("Could not find key '%s'." % k)
if utype is not None:
if not isinstance(utype, dict):
raise TypeError("Validate term dict: Unit type '%s' not understood" % str(type(utype)))
# Convert to internal units
for k, v in dimensions.items():
internal = units.convert_contexts(v)
cf = units.conversion_factor(utype[k], internal)
self._box_size[k] = cf * lattice_const[k]
else:
for k, v in lattice_const.items():
self._box_size[k] = v
def get_box_size(self, utype=None):
"""
Gets the overall size of the box for the datalayer
"""
ret = copy.deepcopy(self._box_size)
# Get information for internal representation of box dimensions
box_metadata = metadata.box_metadata
dimensions = box_metadata["dimensions"]
if utype is not None and ret:
if not isinstance(utype, dict):
raise TypeError("Validate term dict: Unit type '%s' not understood" % str(type(utype)))
# Convert to internal units
for k, v in dimensions.items():
internal = units.convert_contexts(v)
cf = | |
import os.path
from datetime import datetime
from unittest import mock
from unittest.mock import MagicMock
import chardet
import tablib
from core.admin import (
AuthorAdmin,
BookAdmin,
BookResource,
CustomBookAdmin,
ImportMixin,
)
from core.models import Author, Book, Category, EBook, Parent
from django.contrib.admin.models import DELETION, LogEntry
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http import HttpRequest
from django.test.testcases import TestCase
from django.test.utils import override_settings
from django.utils.translation import gettext_lazy as _
from tablib import Dataset
from import_export import formats
from import_export.admin import (
ExportActionMixin,
ExportActionModelAdmin,
ExportMixin,
ImportExportActionModelAdmin,
)
from import_export.formats.base_formats import DEFAULT_FORMATS
from import_export.tmp_storages import TempFolderStorage
class ImportExportAdminIntegrationTest(TestCase):
def setUp(self):
user = User.objects.create_user('admin', '<EMAIL>',
'password')
user.is_staff = True
user.is_superuser = True
user.save()
self.client.login(username='admin', password='password')
def test_import_export_template(self):
response = self.client.get('/admin/core/book/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'admin/import_export/change_list_import_export.html')
self.assertContains(response, _('Import'))
self.assertContains(response, _('Export'))
@override_settings(TEMPLATE_STRING_IF_INVALID='INVALID_VARIABLE')
def test_import(self):
# GET the import form
response = self.client.get('/admin/core/book/import/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/import_export/import.html')
self.assertContains(response, 'form action=""')
# POST the import form
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
self.assertIn('result', response.context)
self.assertFalse(response.context['result'].has_errors())
self.assertIn('confirm_form', response.context)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
self.assertEqual(data['original_file_name'], 'books.csv')
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response,
_('Import finished, with {} new and {} updated {}.').format(
1, 0, Book._meta.verbose_name_plural)
)
def test_delete_from_admin(self):
# test delete from admin site (see #432)
# create a book which can be deleted
b = Book.objects.create(id=1)
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books-for-delete.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
# check the LogEntry was created as expected
deleted_entry = LogEntry.objects.latest('id')
self.assertEqual("delete through import_export", deleted_entry.change_message)
self.assertEqual(DELETION, deleted_entry.action_flag)
self.assertEqual(b.id, int(deleted_entry.object_id))
self.assertEqual("", deleted_entry.object_repr)
@override_settings(TEMPLATE_STRING_IF_INVALID='INVALID_VARIABLE')
def test_import_mac(self):
# GET the import form
response = self.client.get('/admin/core/book/import/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/import_export/import.html')
self.assertContains(response, 'form action=""')
# POST the import form
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books-mac.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
self.assertIn('result', response.context)
self.assertFalse(response.context['result'].has_errors())
self.assertIn('confirm_form', response.context)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
self.assertEqual(data['original_file_name'], 'books-mac.csv')
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response,
_('Import finished, with {} new and {} updated {}.').format(
1, 0, Book._meta.verbose_name_plural)
)
def test_export(self):
response = self.client.get('/admin/core/book/export/')
self.assertEqual(response.status_code, 200)
data = {
'file_format': '0',
}
date_str = datetime.now().strftime('%Y-%m-%d')
response = self.client.post('/admin/core/book/export/', data)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header("Content-Disposition"))
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertEqual(
response['Content-Disposition'],
'attachment; filename="Book-{}.csv"'.format(date_str)
)
def test_returns_xlsx_export(self):
response = self.client.get('/admin/core/book/export/')
self.assertEqual(response.status_code, 200)
for i, f in enumerate(DEFAULT_FORMATS):
if f().get_title() == 'xlsx':
xlsx_index = i
break
else:
self.fail('Unable to find xlsx format. DEFAULT_FORMATS: %r' % DEFAULT_FORMATS)
data = {'file_format': str(xlsx_index)}
response = self.client.post('/admin/core/book/export/', data)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header("Content-Disposition"))
self.assertEqual(response['Content-Type'],
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
def test_import_export_buttons_visible_without_add_permission(self):
# issue 38 - Export button not visible when no add permission
original = BookAdmin.has_add_permission
BookAdmin.has_add_permission = lambda self, request: False
response = self.client.get('/admin/core/book/')
BookAdmin.has_add_permission = original
self.assertContains(response, _('Export'))
self.assertContains(response, _('Import'))
def test_import_buttons_visible_without_add_permission(self):
# When using ImportMixin, users should be able to see the import button
# without add permission (to be consistent with ImportExportMixin)
original = AuthorAdmin.has_add_permission
AuthorAdmin.has_add_permission = lambda self, request: False
response = self.client.get('/admin/core/author/')
AuthorAdmin.has_add_permission = original
self.assertContains(response, _('Import'))
self.assertTemplateUsed(response, 'admin/import_export/change_list.html')
def test_import_file_name_in_tempdir(self):
# 65 - import_file_name form field can be use to access the filesystem
import_file_name = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
data = {
'input_format': "0",
'import_file_name': import_file_name,
'original_file_name': 'books.csv'
}
with self.assertRaises(FileNotFoundError):
self.client.post('/admin/core/book/process_import/', data)
def test_csrf(self):
response = self.client.get('/admin/core/book/process_import/')
self.assertEqual(response.status_code, 405)
def test_import_log_entry(self):
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
book = LogEntry.objects.latest('id')
self.assertEqual(book.object_repr, "Some book")
self.assertEqual(book.object_id, str(1))
def test_import_log_entry_with_fk(self):
Parent.objects.create(id=1234, name='Some Parent')
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'child.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/child/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/child/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
child = LogEntry.objects.latest('id')
self.assertEqual(child.object_repr, 'Some - child of Some Parent')
self.assertEqual(child.object_id, str(1))
def test_logentry_creation_with_import_obj_exception(self):
# from https://mail.python.org/pipermail/python-dev/2008-January/076194.html
def monkeypatch_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
# Cause an exception in import_row, but only after import is confirmed,
# so a failure only occurs when ImportMixin.process_import is called.
class R(BookResource):
def import_obj(self, obj, data, dry_run, **kwargs):
if dry_run:
super().import_obj(obj, data, dry_run, **kwargs)
else:
raise Exception
@monkeypatch_method(BookAdmin)
def get_resource_class(self):
return R
# Verify that when an exception occurs in import_row, when raise_errors is False,
# the returned row result has a correct import_type value,
# so generating log entries does not fail.
@monkeypatch_method(BookAdmin)
def process_dataset(self, dataset, confirm_form, request, *args, **kwargs):
resource = self.get_import_resource_class()(**self.get_import_resource_kwargs(request, *args, **kwargs))
return resource.import_data(dataset,
dry_run=False,
raise_errors=False,
file_name=confirm_form.cleaned_data['original_file_name'],
user=request.user,
**kwargs)
dataset = Dataset(headers=["id","name","author_email"])
dataset.append([1, "Test 1", "<EMAIL>"])
input_format = '0'
content = dataset.csv
f = SimpleUploadedFile("data.csv", content.encode(), content_type="text/csv")
data = {
"input_format": input_format,
"import_file": f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
def test_import_with_customized_forms(self):
"""Test if admin import works if forms are customized"""
# We reuse import scheme from `test_import` to import books.csv.
# We use customized BookAdmin (CustomBookAdmin) with modified import
# form, which requires Author to be selected (from available authors).
# Note that url is /admin/core/ebook/import (and not: ...book/import)!
# We need at least a single author in the db to select from in the
# admin import custom forms
Author.objects.create(id=11, name='<NAME>')
# GET the import form
response = self.client.get('/admin/core/ebook/import/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/import_export/import.html')
self.assertContains(response, 'form action=""')
# POST the import form
input_format = '0'
filename = os.path.join(os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as fobj:
data = {'author': 11,
'input_format': input_format,
'import_file': fobj}
response = self.client.post('/admin/core/ebook/import/', data)
self.assertEqual(response.status_code, 200)
self.assertIn('result', response.context)
self.assertFalse(response.context['result'].has_errors())
self.assertIn('confirm_form', response.context)
confirm_form = response.context['confirm_form']
self.assertIsInstance(confirm_form,
CustomBookAdmin(EBook, 'ebook/import')
.get_confirm_import_form())
data = confirm_form.initial
self.assertEqual(data['original_file_name'], 'books.csv')
response = self.client.post('/admin/core/ebook/process_import/',
data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
_('Import finished, with {} new and {} updated {}.').format(
1, 0, EBook._meta.verbose_name_plural)
)
def test_get_skip_admin_log_attribute(self):
m = ImportMixin()
m.skip_admin_log = True
self.assertTrue(m.get_skip_admin_log())
def test_get_tmp_storage_class_attribute(self):
"""Mock dynamically loading a class defined by an attribute"""
target = "SomeClass"
m = ImportMixin()
m.tmp_storage_class = "tmpClass"
with mock.patch("import_export.admin.import_string") as mock_import_string:
mock_import_string.return_value = target
self.assertEqual(target, m.get_tmp_storage_class())
def test_get_import_data_kwargs_with_form_kwarg(self):
"""
Test that if a the method is called with a 'form' kwarg,
then it is removed and the updated dict is returned
"""
request = MagicMock(spec=HttpRequest)
m = ImportMixin()
kw = {
"a": 1,
"form": "some_form"
}
target = {
"a": 1
}
self.assertEqual(target, m.get_import_data_kwargs(request, **kw))
def test_get_import_data_kwargs_with_no_form_kwarg_returns_empty_dict(self):
"""
Test that if a the method is called with no 'form' kwarg,
then an empty dict is returned
"""
request = MagicMock(spec=HttpRequest)
m = ImportMixin()
kw = {
"a": 1,
}
target = {}
self.assertEqual(target, m.get_import_data_kwargs(request, **kw))
def test_get_context_data_returns_empty_dict(self):
m = ExportMixin()
self.assertEqual(dict(), m.get_context_data())
def test_media_attribute(self):
"""
Test that the 'media' attribute of the ModelAdmin class is overridden to include
the project-specific js file.
"""
mock_model = mock.MagicMock()
mock_site = mock.MagicMock()
class TestExportActionModelAdmin(ExportActionModelAdmin):
def __init__(self):
super().__init__(mock_model, mock_site)
m = TestExportActionModelAdmin()
target_media = m.media
self.assertEqual('import_export/action_formats.js', target_media._js[-1])
class TestImportExportActionModelAdmin(ImportExportActionModelAdmin):
def __init__(self, mock_model, mock_site, error_instance):
self.error_instance = error_instance
super().__init__(mock_model, mock_site)
def write_to_tmp_storage(self, import_file, input_format):
mock_storage = MagicMock(spec=TempFolderStorage)
mock_storage.read.side_effect = self.error_instance
return mock_storage
class ImportActionDecodeErrorTest(TestCase):
mock_model = mock.Mock(spec=Book)
mock_model.__name__ = "mockModel"
mock_site = mock.MagicMock()
mock_request = MagicMock(spec=HttpRequest)
mock_request.POST = {'a': 1}
mock_request.FILES = {}
@mock.patch("import_export.admin.ImportForm")
def test_import_action_handles_UnicodeDecodeError(self, mock_form):
mock_form.is_valid.return_value = True
b_arr = b'\x00\x00'
m = TestImportExportActionModelAdmin(self.mock_model, self.mock_site,
UnicodeDecodeError('codec', b_arr, 1, 2, 'fail!'))
res = m.import_action(self.mock_request)
self.assertEqual(
"<h1>Imported file has a wrong encoding: \'codec\' codec can\'t decode byte 0x00 in position 1: fail!</h1>",
res.content.decode())
@mock.patch("import_export.admin.ImportForm")
def test_import_action_handles_error(self, mock_form):
mock_form.is_valid.return_value = True
m = TestImportExportActionModelAdmin(self.mock_model, self.mock_site,
ValueError("fail"))
res = m.import_action(self.mock_request)
self.assertRegex(
res.content.decode(),
r"<h1>ValueError encountered while trying to read file: .*</h1>")
class ExportActionAdminIntegrationTest(TestCase):
def setUp(self):
user = User.objects.create_user('admin', '<EMAIL>',
'password')
user.is_staff = True
user.is_superuser = True
user.save()
self.cat1 = Category.objects.create(name='Cat | |
<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import with_statement, print_function
from __future__ import absolute_import
'''
author: <NAME>
organization: I2BM, Neurospin, Gif-sur-Yvette, France
organization: CATI, France
license: `CeCILL-B <http://www.cecill.info/licences/Licence_CeCILL_B-en.html>`_
'''
# TODO:
# clean() is called way too often (for each workflow / job / file to be
# removed), and too much power is probably taken in scanning obsolete items and
# files. It should be actually done after a short time, combining several calls
# to clean()
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------
from six.moves import range
import sqlite3
import threading
import os
import shutil
import logging
import pickle
from datetime import date
from datetime import timedelta
from datetime import datetime
import socket
import itertools
import io
import traceback
import math
import glob
import ctypes
import ctypes.util
import tempfile
import json
import sys
import soma_workflow.constants as constants
from soma_workflow.client import FileTransfer, TemporaryPath, SpecialPath
from soma_workflow.errors import UnknownObjectError, DatabaseError
from soma_workflow.info import DB_VERSION, DB_PICKLE_PROTOCOL
from soma_workflow import utils
from soma_workflow.engine_types import get_EngineTemporaryPath
import six
from six.moves import StringIO
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
strtime_format = '%Y-%m-%d %H:%M:%S'
file_separator = ', '
update_interval = timedelta(0, 30, 0)
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
def adapt_datetime(ts):
return ts.strftime(strtime_format)
sqlite3.register_adapter(datetime, adapt_datetime)
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
'''
Job server database tables:
Users
id
login or other userId
Jobs
=> identification:
id : int
user_id : int
=> used by the job system (DrmaaWorkflowEngine, WorkflowDatabaseServer)
drmaa_id : string, None if not submitted
submitted job DRMAA identifier
expiration_date : date
status : string
job status as defined in constants.JOB_STATUS
last_status_update : date
workflow_id : int, optional
id of the workflow the job belongs to.
None if it doesn't belong to any.
stdout_file : file path
stderr_file : file path, optional
input_params_file : file path, optional
output_params_file : file path, optional
pickled_engine_job
=> used to submit the job
command : string
job command
stdin_file : file path, optional
job's standard input as a path to a file.
C{None} if the job doesn't require an input stream.
join_errout : boolean
C{True} if the standard error should be
redirect in the same file as the standard output
(stdout_file : file path)
job's standard output as a path to a file
(stderr_file : file path, optional)
job's standard output as a path to a file
working_directory : dir path, optional
path of the job working directory.
custom_submission : boolean
C{True} if it was a custom submission.
If C{True} the standard output files won't
be deleted with the job.
parallel_config_name : string, optional
if the job is made to run on several nodes:
name of the parallel configuration as defined
in configuration.PARALLEL_CONFIGURATIONS.
nodes_number : int, optional
number of nodes requested by the job to run
cpu_per_node : int, optional
number of CPU/cores needed for each node
queue : string, optional
name of the queue used to submit the job.
=> for user and administrator usage
name : string, optional
optional name of the job.
submission_date : date
execution_date : date
ending_date : date
exit_status : string, optional
exit status string as defined in constants.JOB_EXIT_STATUS
exit_value : int, optional
if the status is FINISHED_REGULARLY, it contains the operating
terminating_signal : string, optional
if the status is FINISHED_TERM_SIG, it contain a
representation of the signal that caused the termination of the job.
system exit code of the job.
resource_usage_file : string, optional
contain the resource usage information of the job.
Transfer
id
engine file path
client file path (optional)
transfer date
expiration date
user_id
workflow_id (optional)
status
client_paths
transfer_type
Input/Ouput junction table
job_id
engine file path (transferid)
input or output
Workflows
id,
user_id,
pickled_engine_workflow,
expiration_date,
name,
ended_transfered,
status
'''
def create_database(database_file):
if not database_file.startswith(':'):
db_dir = os.path.dirname(database_file)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
connection = sqlite3.connect(
database_file, timeout=5, isolation_level="EXCLUSIVE",
check_same_thread=False)
cursor = connection.cursor()
cursor.execute(
'''CREATE TABLE users (
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
login VARCHAR(255) NOT NULL UNIQUE)''')
cursor.execute(
'''CREATE TABLE jobs (
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
user_id INTEGER NOT NULL CONSTRAINT known_user REFERENCES users (id),
drmaa_id VARCHAR(255),
expiration_date DATE NOT NULL,
status VARCHAR(255) NOT NULL,
last_status_update DATE NOT NULL,
workflow_id INTEGER CONSTRAINT known_workflow REFERENCES workflows (id),
command TEXT,
stdin_file TEXT,
join_errout BOOLEAN NOT NULL,
stdout_file TEXT NOT NULL,
stderr_file TEXT,
working_directory TEXT,
custom_submission BOOLEAN NOT NULL,
parallel_config_name TEXT,
nodes_number INTEGER,
cpu_per_node INTEGER,
queue TEXT,
input_params_file TEXT,
output_params_file TEXT,
name TEXT,
submission_date DATE,
execution_date DATE,
ending_date DATE,
exit_status VARCHAR(255),
exit_value INTEGER,
terminating_signal VARCHAR(255),
resource_usage TEXT,
output_params TEXT,
pickled_engine_job TEXT
)''')
cursor.execute(
'''CREATE TABLE transfers (
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
engine_file_path TEXT,
client_file_path TEXT,
transfer_date DATE,
expiration_date DATE NOT NULL,
user_id INTEGER NOT NULL CONSTRAINT known_user REFERENCES users (id),
workflow_id INTEGER CONSTRAINT known_workflow REFERENCES workflows (id),
status VARCHAR(255) NOT NULL,
client_paths TEXT,
transfer_type TEXT)''')
cursor.execute(
'''CREATE TABLE temporary_paths (
temp_path_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
engine_file_path TEXT,
expiration_date DATE NOT NULL,
user_id INTEGER NOT NULL CONSTRAINT known_user REFERENCES users (id),
workflow_id INTEGER CONSTRAINT known_workflow REFERENCES workflows (id),
status VARCHAR(255) NOT NULL)''')
cursor.execute(
'''CREATE TABLE ios (
job_id INTEGER NOT NULL CONSTRAINT known_job REFERENCES jobs(id),
engine_file_id INTEGER NOT NULL CONSTRAINT known_engine_file REFERENCES transfers (id),
is_input BOOLEAN NOT NULL,
PRIMARY KEY (job_id, engine_file_id, is_input))''')
cursor.execute(
'''CREATE TABLE ios_tmp (
job_id INTEGER NOT NULL CONSTRAINT known_job REFERENCES jobs(id),
temp_path_id INTEGER NOT NULL CONSTRAINT known_tmp_path REFERENCES temporary_paths (temp_path_id),
is_input BOOLEAN NOT NULL,
PRIMARY KEY (job_id, temp_path_id, is_input))''')
cursor.execute('''CREATE TABLE fileCounter (count INTEGER)''')
cursor.execute('INSERT INTO fileCounter (count) VALUES (?)', [0])
cursor.execute(
'''CREATE TABLE workflows (
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
user_id INTEGER NOT NULL CONSTRAINT known_user REFERENCES users (id),
pickled_engine_workflow TEXT,
expiration_date DATE NOT NULL,
name TEXT,
ended_transfers TEXT,
status TEXT,
last_status_update DATE NOT NULL,
queue TEXT) ''')
cursor.execute('''CREATE TABLE db_version (
version TEXT NOT NULL,
python_version TEXT NOT NULL)''')
cursor.execute('''INSERT INTO db_version (version, python_version)
VALUES (?, ?)''', [DB_VERSION, '%d.%d.%d' % sys.version_info[:3]])
# parameters dependencies
cursor.execute(
'''CREATE TABLE param_links (
workflow_id INTEGER NOT NULL CONSTRAINT known_worflow REFERENCES workflows (id),
dest_job_id INTEGER NOT NULL CONSTRAINT known_job REFERENCES jobs (id),
dest_param TEXT,
src_job_id INTEGER NOT NULL CONSTRAINT known_job2 REFERENCES jobs (id),
src_param TEXT,
pickled_function TEXT)''')
cursor.close()
connection.commit()
connection.close()
# -- this is a copy of the find_library in soma-base soma.utils.find_library
ctypes_find_library = ctypes.util.find_library
def find_library(name):
''' :func:`ctypes.util.find_library` is broken on linux at least: it relies
on ``ldconfig``, which only searches system paths, not user paths nor
``LD_LIBRARY_PATH``, or alternatively uses ``gcc``, which is not always
installed nor configured.
Here we are looking in ``[[DY]LD_LIBRARY_]PATH`` (depending on the system)
'''
def sorted_match(filenames):
return sorted(filenames)[-1] # probably not the best
exts = ['.so']
patterns = [ext + '.*' for ext in exts]
fname = 'lib' + name
if sys.platform.startswith('linux'):
envar = 'LD_LIBRARY_PATH'
elif sys.platform == 'darwin':
envar = 'DYLD_LIBRARY_PATH'
exts = ['.dylib']
patterns = ['.*' + ext for ext in exts]
elif sys.platform.startswith('win'):
envar = 'PATH'
exts = ['.dll', '.DLL']
patterns = ['.*' + ext for ext in exts]
else:
# other undetermined system (bsd, othe unix...?), assume ELF
envar = 'LD_LIBRARY_PATH'
paths = os.environ.get(envar)
if paths is None:
# no path: fallback to ctypes
return ctypes_find_library(name)
paths = paths.split(os.pathsep)
names = [fname + ext for ext in exts] + [name + ext for ext in exts]
patterns = [fname + pattern for pattern in patterns] \
+ [name + pattern for pattern in patterns]
found = None
for path in paths:
for tname in names:
filename = os.path.join(path, tname)
if os.path.exists(filename):
found = filename
break
for tname in patterns:
filenames = glob.glob(os.path.join(path, tname))
if len(filenames) != 0:
found = sorted_match(filenames)
break
if found is not None:
return os.path.basename(os.path.realpath(found))
# not found: fallback to ctypes
return ctypes_find_library(name)
#--
_sqlite3_max_variable_number = -1
def sqlite3_max_variable_number():
''' Get the max number of variables sqlite3 can accept in a query/insert
operation. This calls the C API using ctypes, and a temporary database,
since python sqlite3 module does not expose the sqlite3_limit() function.
Returns
-------
max_var: int
max variable number, or 0 if an error occurred.
'''
global _sqlite3_max_variable_number
if _sqlite3_max_variable_number != -1:
return _sqlite3_max_variable_number
try:
lib = find_library('sqlite3')
if lib is None and sys.platform.startswith('win'):
lib = find_library('sqlite3-0')
dll = ctypes.CDLL(lib)
if dll is not None:
t = tempfile.mkstemp(suffix='.sqlite')
os.close(t[0])
try:
db = ctypes.c_void_p(None)
dll.sqlite3_open_v2(t[1], ctypes.byref(db), 2,
ctypes.c_void_p(None))
_sqlite3_max_variable_number = dll.sqlite3_limit(db, 9, -1)
finally:
dll.sqlite3_close(db)
os.unlink(t[1])
except Exception:
| |
_booleancondition_ is not None:
self.set_booleancondition(_booleancondition_)
if _r_par_ is not None:
self.set_rpar(_r_par_)
def clone(self):
return AStatementCondition(self.clone_node(self._l_par_),
self.clone_node(self._booleancondition_),
self.clone_node(self._r_par_))
def apply(self, analysis):
analysis.case_astatementcondition(self)
def get_lpar(self):
return self._l_par_
def set_lpar(self, node):
if self._l_par_ is not None:
self._l_par_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._l_par_ = node
def get_booleancondition(self):
return self._booleancondition_
def set_booleancondition(self, node):
if self._booleancondition_ is not None:
self._booleancondition_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._booleancondition_ = node
def get_rpar(self):
return self._r_par_
def set_rpar(self, node):
if self._r_par_ is not None:
self._r_par_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._r_par_ = node
def __str__(self):
return '' + self.to_string(self._l_par_) + self.to_string(self._booleancondition_) \
+ self.to_string(self._r_par_)
def __repr__(self):
return "'" + self.__str__() + "'"
def remove_child(self, child):
if self._l_par_ == child:
self._l_par_ = None
return
if self._booleancondition_ == child:
self._booleancondition_ = None
return
if self._r_par_ == child:
self._r_par_ = None
return
def replace_child(self, old_child, new_child):
if self._l_par_ == old_child:
self.set_lpar(new_child)
return
if self._booleancondition_ == old_child:
self.set_booleancondition(new_child)
return
if self._r_par_ == old_child:
self.set_rpar(new_child)
return
class AResidueCondition(Node):
def __init__(self, _residue_=None):
super(AResidueCondition, self).__init__()
self._residue_ = None
if _residue_ is not None:
self.set_residue(_residue_)
def clone(self):
return AResidueCondition(self.clone_node(self._residue_))
def apply(self, analysis):
analysis.case_aresiduecondition(self)
def get_residue(self):
return self._residue_
def set_residue(self, node):
if self._residue_ is not None:
self._residue_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._residue_ = node
def __str__(self):
return '' + self.to_string(self._residue_)
def __repr__(self):
return "'" + self.__str__() + "'"
def remove_child(self, child):
if self._residue_ == child:
self._residue_ = None
return
def replace_child(self, old_child, new_child):
if self._residue_ == old_child:
self.set_residue(new_child)
return
class AExcludeCondition(Node):
def __init__(self, _excludestatement_=None):
super(AExcludeCondition, self).__init__()
self._excludestatement_ = None
if _excludestatement_ is not None:
self.set_excludestatement(_excludestatement_)
def clone(self):
return AExcludeCondition(self.clone_node(self._excludestatement_))
def apply(self, analysis):
analysis.case_aexcludecondition(self)
def get_excludestatement(self):
return self._excludestatement_
def set_excludestatement(self, node):
if self._excludestatement_ is not None:
self._excludestatement_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._excludestatement_ = node
def __str__(self):
return '' + self.to_string(self._excludestatement_)
def __repr__(self):
return "'" + self.__str__() + "'"
def remove_child(self, child):
if self._excludestatement_ == child:
self._excludestatement_ = None
return
def replace_child(self, old_child, new_child):
if self._excludestatement_ == old_child:
self.set_excludestatement(new_child)
return
class ASelectCondition(Node):
def __init__(self, _selectstatement_=None):
super(ASelectCondition, self).__init__()
self._selectstatement_ = None
if _selectstatement_ is not None:
self.set_selectstatement(_selectstatement_)
def clone(self):
return ASelectCondition(self.clone_node(self._selectstatement_))
def apply(self, analysis):
analysis.case_aselectcondition(self)
def get_selectstatement(self):
return self._selectstatement_
def set_selectstatement(self, node):
if self._selectstatement_ is not None:
self._selectstatement_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._selectstatement_ = node
def __str__(self):
return '' + self.to_string(self._selectstatement_)
def __repr__(self):
return "'" + self.__str__() + "'"
def remove_child(self, child):
if self._selectstatement_ == child:
self._selectstatement_ = None
return
def replace_child(self, old_child, new_child):
if self._selectstatement_ == old_child:
self.set_selectstatement(new_child)
return
class ACondition2(Node):
def __init__(self, _logicsymbol_=None, _condition_=None):
super(ACondition2, self).__init__()
self._logicsymbol_ = None
self._condition_ = None
if _logicsymbol_ is not None:
self.set_logicsymbol(_logicsymbol_)
if _condition_ is not None:
self.set_condition(_condition_)
def clone(self):
return ACondition2(self.clone_node(self._logicsymbol_), self.clone_node(self._condition_))
def apply(self, analysis):
analysis.case_acondition2(self)
def get_logicsymbol(self):
return self._logicsymbol_
def set_logicsymbol(self, node):
if self._logicsymbol_ is not None:
self._logicsymbol_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._logicsymbol_ = node
def get_condition(self):
return self._condition_
def set_condition(self, node):
if self._condition_ is not None:
self._condition_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._condition_ = node
def __str__(self):
return '' + self.to_string(self._logicsymbol_) + self.to_string(self._condition_)
def __repr__(self):
return "'" + self.__str__() + "'"
def remove_child(self, child):
if self._logicsymbol_ == child:
self._logicsymbol_ = None
return
if self._condition_ == child:
self._condition_ = None
return
def replace_child(self, old_child, new_child):
if self._logicsymbol_ == old_child:
self.set_logicsymbol(new_child)
return
if self._condition_ == old_child:
self.set_condition(new_child)
return
class AAndLogicsymbol(Node):
def __init__(self, _and_=None):
super(AAndLogicsymbol, self).__init__()
self._and_ = None
if _and_ is not None:
self.set_and(_and_)
def clone(self):
return AAndLogicsymbol(self.clone_node(self._and_))
def apply(self, analysis):
analysis.case_aandlogicsymbol(self)
def get_and(self):
return self._and_
def set_and(self, node):
if self._and_ is not None:
self._and_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._and_ = node
def __str__(self):
return '' + self.to_string(self._and_)
def __repr__(self):
return "'" + self.__str__() + "'"
def remove_child(self, child):
if self._and_ == child:
self._and_ = None
return
def replace_child(self, old_child, new_child):
if self._and_ == old_child:
self.set_and(new_child)
return
class AOrLogicsymbol(Node):
def __init__(self, _or_=None):
super(AOrLogicsymbol, self).__init__()
self._or_ = None
if _or_ is not None:
self.set_or(_or_)
def clone(self):
return AOrLogicsymbol(self.clone_node(self._or_))
def apply(self, analysis):
analysis.case_aorlogicsymbol(self)
def get_or(self):
return self._or_
def set_or(self, node):
if self._or_ is not None:
self._or_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._or_ = node
def __str__(self):
return '' + self.to_string(self._or_)
def __repr__(self):
return "'" + self.__str__() + "'"
def remove_child(self, child):
if self._or_ == child:
self._or_ = None
return
def replace_child(self, old_child, new_child):
if self._or_ == old_child:
self.set_or(new_child)
return
class AResidueResidue(Node):
def __init__(self, _originalaminoacid_=None, _integer_=None, _mutatedaminoacid_=None):
super(AResidueResidue, self).__init__()
self._originalaminoacid_ = None
self._integer_ = None
self._mutatedaminoacid_ = list()
if _originalaminoacid_ is not None:
self.set_originalaminoacid(_originalaminoacid_)
if _integer_ is not None:
self.set_integer(_integer_)
if _mutatedaminoacid_ is not None:
self._mutatedaminoacid_.extend(_mutatedaminoacid_)
def clone(self):
return AResidueResidue(self.clone_node(self._originalaminoacid_),
self.clone_node(self._integer_),
self.clone_list(self._mutatedaminoacid_))
def apply(self, analysis):
analysis.case_aresidueresidue(self)
def get_originalaminoacid(self):
return self._originalaminoacid_
def set_originalaminoacid(self, node):
if self._originalaminoacid_ is not None:
self._originalaminoacid_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._originalaminoacid_ = node
def get_integer(self):
return self._integer_
def set_integer(self, node):
if self._integer_ is not None:
self._integer_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._integer_ = node
def get_mutatedaminoacid(self):
return self._mutatedaminoacid_
def set_mutatedaminoacid(self, lst):
del self._mutatedaminoacid_[0:len(self._mutatedaminoacid_)]
self._mutatedaminoacid_.extend(lst)
def __str__(self):
return '' + self.to_string(self._originalaminoacid_) + self.to_string(self._integer_) \
+ self.to_string(self._mutatedaminoacid_)
def __repr__(self):
return "'" + self.__str__() + "'"
def remove_child(self, child):
if self._originalaminoacid_ == child:
self._originalaminoacid_ = None
return
if self._integer_ == child:
self._integer_ = None
return
if child in self._mutatedaminoacid_:
self._mutatedaminoacid_.remove(child)
return
def replace_child(self, old_child, new_child):
if self._originalaminoacid_ == old_child:
self.set_originalaminoacid(new_child)
return
if self._integer_ == old_child:
self.set_integer(new_child)
return
for i in range(self._mutatedaminoacid_):
if self._mutatedaminoacid_[i] == old_child:
if new_child is not None:
self._mutatedaminoacid_[i] = new_child
old_child.set_parent(None)
return
del self._mutatedaminoacid_[i]
old_child.set_parent(None)
return
class AResiduenotResidue(Node):
def __init__(self, _not_=None, _originalaminoacid_=None, _integer_=None,
_mutatedaminoacid_=None):
super(AResiduenotResidue, self).__init__()
self._not_ = None
self._originalaminoacid_ = None
self._integer_ = None
self._mutatedaminoacid_ = list()
if _not_ is not None:
self.set_not(_not_)
if _originalaminoacid_ is not None:
self.set_originalaminoacid(_originalaminoacid_)
if _integer_ is not None:
self.set_integer(_integer_)
if _mutatedaminoacid_ is not None:
self._mutatedaminoacid_.extend(_mutatedaminoacid_)
def clone(self):
return AResiduenotResidue(self.clone_node(self._not_),
self.clone_node(self._originalaminoacid_),
self.clone_node(self._integer_),
self.clone_list(self._mutatedaminoacid_))
def apply(self, analysis):
analysis.case_aresiduenotresidue(self)
def get_not(self):
return self._not_
def set_not(self, node):
if self._not_ is not None:
self._not_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._not_ = node
def get_originalaminoacid(self):
return self._originalaminoacid_
def set_originalaminoacid(self, node):
if self._originalaminoacid_ is not None:
self._originalaminoacid_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._originalaminoacid_ = node
def get_integer(self):
return self._integer_
def set_integer(self, node):
if self._integer_ is not None:
self._integer_.set_parent(None)
if node is not None:
if node.get_parent() is not None:
node.get_parent().remove_child(node)
node.set_parent(self)
self._integer_ = node
def get_mutatedaminoacid(self):
return self._mutatedaminoacid_
def set_mutatedaminoacid(self, lst):
del self._mutatedaminoacid_[0:len(self._mutatedaminoacid_)]
self._mutatedaminoacid_.extend(lst)
def __str__(self):
return '' + self.to_string(self._not_) + self.to_string(self._originalaminoacid_) \
+ self.to_string(self._integer_) + self.to_string(self._mutatedaminoacid_)
def __repr__(self):
return "'" + self.__str__() + "'"
def remove_child(self, child):
if self._not_ == child:
self._not_ = None
return
if self._originalaminoacid_ == child:
self._originalaminoacid_ = None
return
if self._integer_ == child:
self._integer_ = None
return
if child in self._mutatedaminoacid_:
self._mutatedaminoacid_.remove(child)
return
def replace_child(self, old_child, new_child):
if self._not_ == old_child:
self.set_not(new_child)
return
if self._originalaminoacid_ == old_child:
self.set_originalaminoacid(new_child)
return
if self._integer_ == old_child:
self.set_integer(new_child)
return
for i in range(self._mutatedaminoacid_):
if self._mutatedaminoacid_[i] == old_child:
if new_child is not None:
self._mutatedaminoacid_[i] = new_child
old_child.set_parent(None)
return
del self._mutatedaminoacid_[i]
old_child.set_parent(None)
return
class AResidueinvertResidue(Node):
def __init__(self, _originalaminoacid_=None, _integer_=None, _l_par_=None, _not_=None,
_mutatedaminoacid_=None, _r_par_=None):
super(AResidueinvertResidue, self).__init__()
self._originalaminoacid_ = None
self._integer_ = None
self._l_par_ = None
self._not_ = None
self._mutatedaminoacid_ = list()
self._r_par_ = None
if _originalaminoacid_ is not None:
self.set_originalaminoacid(_originalaminoacid_)
if _integer_ is not None:
self.set_integer(_integer_)
if _l_par_ is not None:
self.set_lpar(_l_par_)
if _not_ is not None:
self.set_not(_not_)
if _mutatedaminoacid_ is not None:
self._mutatedaminoacid_.extend(_mutatedaminoacid_)
if _r_par_ is not None:
self.set_rpar(_r_par_)
def clone(self):
return AResidueinvertResidue(self.clone_node(self._originalaminoacid_),
self.clone_node(self._integer_),
self.clone_node(self._l_par_),
self.clone_node(self._not_),
self.clone_list(self._mutatedaminoacid_),
self.clone_node(self._r_par_))
def apply(self, analysis):
analysis.case_aresidueinvertresidue(self)
def get_originalaminoacid(self):
return self._originalaminoacid_
def set_originalaminoacid(self, node):
if self._originalaminoacid_ is not None:
self._originalaminoacid_.set_parent(None)
if node is not | |
SET CURRENTLY ACTIVE LAYER AS THE NEWLY CREATED LAYER
self.currently_active_fault_id = self.total_fault_count
# CREATE NEW LAYER OBJECT
new_fault = Fault()
# SOURCE NEW NODES FROM USER CLICKS
new_fault.id = self.currently_active_fault_id
new_fault.name = str('Fault')
new_fault.x_nodes = self.new_plotx
new_fault.y_nodes = self.new_ploty
# SET CURRENTLY ACTIVE LAYER NODE OBJECTS
self.current_x_nodes = new_fault.x_nodes
self.current_y_nodes = new_fault.y_nodes
# SET SOME OF THE NEW LAYERS ATTRIBUTES
new_fault.id = self.currently_active_layer_id
new_fault.name = str('Fault %s') % self.currently_active_fault_id
# CREATE LAYER LINE
new_fault.mpl_actor = self.model_frame.plot(new_fault.x_nodes, new_fault.y_nodes, color='green',
marker='o', linewidth=0.5, zorder=1, alpha=1.0)
# APPEND THE NEW FAULT TO THE FAULT TREE SIDE PANEL USING add_new_tree_nodes FUNC
# LIST OF FAULT NAMES
self.fault_tree_items.append('fault %s' % (int(self.currently_active_fault_id)))
self.fault_item = 'fault %s' % (int(self.currently_active_fault_id))
self.add_new_tree_nodes(self.fault_tree_root, self.fault_item, self.currently_active_fault_id)
self.fault_tree.SetSpacing(40)
# self.fold_panel_three.Collapse()
# self.fold_panel_three.Expand()
# UPDATE CURRENT PLOT GRAPHICS
self.currently_active_fault.set_data(new_fault.x_nodes, new_fault.y_nodes)
# UPDATE CURRENT NODE RED DOT GRAPHIC
self.current_node.set_offsets([new_fault.x_nodes[0], new_fault.y_nodes[0]])
# APPEND NEW LAYER TO THE LAYER LIST
self.fault_list.append(new_fault)
# INCREMENT THE TOTAL LAYER COUNT
self.total_fault_count += 1
# UPDATE MODEL
self.draw()
def fault_mode_key_press(self, event):
"""KEY PRESS CALLBACKS WHEN FAULT MODE IS ACTIVATED"""
'i = INSERT NEW NODE AT MOUSE POSITION'
if event.key == 'i':
if event.inaxes is None:
return
# INSERT NEW NODE INTO XY LIST
self.xt = np.insert(self.xt, [self.index_arg + 1], event.xdata)
self.yt = np.insert(self.yt, [self.index_arg + 1], event.ydata)
# UPDATE THE FAULT LIST RECORDS
self.fault_list[self.currently_active_fault_id].x_nodes = self.xt
self.fault_list[self.currently_active_fault_id].y_nodes = self.yt
# UPDATE FAULT GRAPHICS
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_xdata(self.xt)
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_ydata(self.yt)
# UPDATE CURRENT FAULT OVERLAY GRAPHIC
self.currently_active_fault.set_data(self.xt, self.yt)
'd = DELETE NODE AT MOUSE POSITION'
if event.key == 'd':
if event.inaxes is None:
return
# FIND NODE CLOSEST TO CURSOR LOCATION
d = np.sqrt((self.xt - event.xdata) ** 2 + (self.yt - event.ydata) ** 2)
self.index_arg = np.argmin(d)
self.distance = d[self.index_arg]
if self.index_arg == 0 or \
self.index_arg == (len(self.fault_list[self.currently_active_fault_id].x_nodes) - 1):
# PREVENT END NODES BEING DELETED
return 0
if self.distance >= self.node_click_limit:
# CLICK WAS TO FAR AWAY FROM A NODE TO DELETE IT
return 0
else:
# DELETE NODE BY RECREATING XY DATA WITHOUT CURRENT NODE
self.xt = [tup for i, tup in enumerate(self.xt) if i != self.index_arg] # DELETE X
self.yt = [tup for i, tup in enumerate(self.yt) if i != self.index_arg] # DELETE Y
# UPDATE THE FAULT LIST RECORDS
self.fault_list[self.currently_active_fault_id].x_nodes = self.xt
self.fault_list[self.currently_active_fault_id].y_nodes = self.yt
# UPDATE FAULT GRAPHICS
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_xdata(self.xt)
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_ydata(self.yt)
# UPDATE CURRENT FAULT OVERLAY GRAPHIC
self.currently_active_fault.set_data(self.xt, self.yt)
# RESET CURRENT NOT POSITION TO FIRST NODE
self.current_node.set_offsets([self.xt[0], self.yt[0]])
# UPDATE GMG
self.update_layer_data()
'< = INCREMENT WHICH FAULT IS BEING EDITED'
if event.key == ',':
if self.currently_active_fault_id <= self.total_fault_count - 1 and self.currently_active_fault_id > 0:
# INCREMENT TO NEXT FAULT
self.currently_active_fault_id -= 1
else:
# GO TO NEWEST FAULT
self.currently_active_fault_id = self.total_fault_count - 1
# UPDATE CURRENT PLOT GRAPHICS
self.currently_active_fault.set_data(self.fault_list[self.currently_active_fault_id].x_nodes,
self.fault_list[self.currently_active_fault_id].y_nodes)
self.xt = self.fault_list[self.currently_active_fault_id].x_nodes
self.yt = self.fault_list[self.currently_active_fault_id].y_nodes
'> = INCREMENT WHICH FAULT IS BEING EDITED'
if event.key == '.':
if self.currently_active_fault_id < self.total_fault_count - 1:
# INCREMENT TO NEXT FAULT
self.currently_active_fault_id += 1
elif self.currently_active_fault_id == self.total_fault_count - 1:
# GO BACK TO FIRST FAULT
self.currently_active_fault_id = 0
# UPDATE CURRENT PLOT GRAPHICS
self.currently_active_fault.set_data(self.fault_list[self.currently_active_fault_id].x_nodes,
self.fault_list[self.currently_active_fault_id].y_nodes)
self.xt = self.fault_list[self.currently_active_fault_id].x_nodes
self.yt = self.fault_list[self.currently_active_fault_id].y_nodes
# UPDATE GMG
self.update_layer_data()
def on_fault_activated(self, event):
"""RESPONSE WHEN A FAULT NAME IS SELECTED"""
# GET THE SELECTED FAULT INDEX NUMBER
self.currently_active_fault_id = self.fault_tree.GetPyData(event.GetItem())
if self.fault_picking_switch is False:
self.fault_picking_switch = True
# SET CHECKBOX AS CHECKED
self.fault_tree.GetSelection().Check(checked=True)
# UPDATE CURRENT PLOT GRAPHICS
self.currently_active_fault.set_data(self.fault_list[self.currently_active_fault_id].x_nodes,
self.fault_list[self.currently_active_fault_id].y_nodes)
self.xt = self.fault_list[self.currently_active_fault_id].x_nodes
self.yt = self.fault_list[self.currently_active_fault_id].y_nodes
# UPDATE GRAPHICS WITH CURRENT FAULT SELECTED
self.update_layer_data()
def fault_checked(self, event):
"""TOGGLE WHETHER OR NOT A FAULT WILL BE PLOTTED IN THE MODEL FIGURE"""
i = self.fault_tree.GetPyData(event.GetItem())
if self.faults[i][0].get_visible() == True:
# HIDE FAULT
self.faults[i][0].set_visible(False)
self.currently_active_fault.set_visible(False)
else:
# SHOW FAULT
self.faults[i][0].set_visible(True)
self.currently_active_fault.set_visible(True)
# UPDATE FIGURE
self.draw()
def on_fault_tree_right_click_down(self, event):
"""WHEN A FAULT IN THE FAULT TREE MENU IS RIGHT CLICKED"""
# FIRST RUN on_fault_activated
self.on_fault_activated(event)
# CREATE POPOUT MENU WITH OPTIONS AND BIND OPTIONS TO ACTIONS
menu = wx.Menu()
item1 = menu.Append(wx.ID_ANY, "Change fault colour")
item2 = menu.Append(wx.ID_ANY, "Rename fault")
self.Bind(wx.EVT_MENU, self.change_color, item1)
self.Bind(wx.EVT_MENU, self.rename_fault, item2)
self.PopupMenu(menu)
menu.Destroy()
def rename_fault(self, event):
"""USE A POPUP MENU TO RENAME THE FAULT"""
# CREATE POP OUT MENU AND SHOW
fault_name_box = LayerNameDialog(self, -1, 'Rename fault',
self.fault_tree_items[self.currently_active_fault_id])
new = fault_name_box.ShowModal()
# WAIT FOR USER TO CLOSE POP OUT
# GET THE NEW LAYER NAME FROM POP OUT
new_name = fault_name_box.name
# SET THE TREE AND LAYER OBJECT WITH THE NEW NAME
current_tree_items = self.fault_tree.GetRootItem().GetChildren()
self.fault_tree.SetItemText(current_tree_items[self.currently_active_fault_id], str(new_name))
self.fault_tree_items[self.currently_active_fault_id] = str(new_name)
self.fault_list[self.currently_active_fault_id].name = str(new_name)
# LAYER AND MODEL ATTRIBUTE CONTROLS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def set_density(self, value):
self.layer_list[self.currently_active_layer_id].density = float(self.density_input.GetValue() * 1000.)
def set_reference_density(self, value):
self.layer_list[self.currently_active_layer_id].reference_density = \
float(self.ref_density_input.GetValue() * 1000.)
def set_background_density(self, event):
grav_box = SetBackgroundDensityDialog(self, -1, 'Set background density')
answer = grav_box.ShowModal()
self.background_density_upper = float(grav_box.background_density_upper)
for i in range(0, self.total_layer_count):
self.layer_list[i].reference_density = float(self.background_density_upper) * 1000.
# self.background_density_upper = float((grav_box.background_density_lower))
# self.background_density_upper = float((grav_box.background_density_lid))
self.absolute_densities = True
self.draw()
def set_susceptibility(self, value):
self.layer_list[self.currently_active_layer_id].susceptibility = float(self.susceptibility_input.GetValue())
def set_angle_a(self, value):
self.layer_list[self.currently_active_layer_id].angle_a = float(self.angle_a_input.GetValue())
def set_angle_b(self, value):
self.layer_list[self.currently_active_layer_id].angle_b = float(self.angle_b_input.GetValue())
def set_angle_c(self, value):
self.layer_list[self.currently_active_layer_id].angle_c = float(self.angle_c_input.GetValue())
def set_earth_field(self, value):
self.layer_list[self.currently_active_layer_id].earth_field = float(self.earth_field_input.GetValue())
def set_text_size(self, value):
"""GET NEW TEXT SIZE"""
self.textsize = float(self.text_size_input.GetValue())
# WELL DATA
# LOOP THROUGH ALL WELL NAMES
for i in range(len(self.well_data_list)):
self.well_data_list[i].text_size = self.textsize
self.well_data_list[i].mpl_actor_name.set_size(self.textsize)
# LOOP THROUGH ALL WELL HORIZON LABELS
for l in range(len(self.well_data_list[i].labels_list)):
if self.well_data_list[i].labels_list[l] is not None:
self.well_data_list[i].labels_list[l].set_size(self.textsize)
# # LOOP THROUGH OUTCROP DATA LABELS
if self.outcrop_data_count > 0:
for i in range(self.outcrop_data_count):
if self.outcrop_data_list[i] is not None:
for t in range(len(self.outcrop_data_list[i].labels)):
self.outcrop_data_list[i].labels[t].set_fontsize(self.textsize)
# REDRAW ANNOTATIONS WITH NEW TEXT SIZE
self.draw()
def set_obs_grav_rms(self, value):
"""SET THE DATA TO BE USED FOR CALCULATING THE RMS MISTFIT"""
selection = SetObsRmsDialog(self, -1, 'Set RMS Input', self.observed_gravity_list)
answer = selection.ShowModal()
for i in range(0, len(self.observed_gravity_list)):
if self.observed_gravity_list[i].name == selection.obs_name:
self.obs_gravity_data_for_rms = self.observed_gravity_list[i].data
def set_obs_mag_rms(self, value):
"""SET THE DATA TO BE USED FOR CALCULATING THE RMS MISTFIT"""
selection = SetObsRmsDialog(self, -1, 'Set RMS Input', self.observed_magnetic_list)
answer = selection.ShowModal()
for i in range(0, len(self.observed_magnetic_list)):
if self.observed_magnetic_list[i].name == selection.obs_name:
self.obs_mag_data_for_rms = self.observed_magnetic_list[i].data
def model_rms(self, xp):
"""CALCULATE RMS MISFIT OF OBSERVED VS CALCULATED"""
if self.obs_gravity_data_for_rms != [] and self.calc_grav_switch is True:
x = xp * 0.001
y = self.predicted_gravity
self.gravity_rms_value, self.grav_residuals = model_stats.rms(self.obs_gravity_data_for_rms[:, 0],
self.obs_gravity_data_for_rms[:, 1], x, y)
else:
pass
if self.obs_mag_data_for_rms != [] and self.calc_mag_switch is True:
x = self.xp * 0.001
y = self.predicted_nt
self.magnetic_rms_value, self.mag_residuals = model_stats.rms(self.obs_mag_data_for_rms[:, 0],
self.obs_mag_data_for_rms[:, 1], x, y)
else:
pass
# LAYER ATTRIBUTE TABLE~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def open_attribute_table(self, event):
self.attribute_table = AttributeEditor(self, -1, 'Attribute editor', self.tree_items, self.layer_list)
self.attribute_table.Show(True)
def attribute_set(self, new_tree_items, new_layer_list):
"""UPDATE GMG ATTRIBUTES WITH NEW ATTRIBUTES FROM THE ATTRIBUTE TABLE"""
# UPDATE MAIN FRAME TREE ITEMS (RENAME THE ITEMS)
current_tree_items = self.tree.GetRootItem().GetChildren()
for i in range(0, len(self.tree_items) - 1):
new_label = new_tree_items[i]
self.tree.SetItemText(current_tree_items[i], new_tree_items[i + 1])
# UPDATE MAIN FRAME ATTRIBUTES
for l in range(0, len(self.layer_list)):
self.layer_list[l].density = new_layer_list[l].density
self.layer_list[l].reference_density = new_layer_list[l].reference_density
self.layer_list[l].susceptibility = new_layer_list[l].susceptibility
self.layer_list[l].angle_a = new_layer_list[l].angle_a
self.layer_list[l].angle_b = new_layer_list[l].angle_b
self.layer_list[l].angle_c = new_layer_list[l].angle_c
self.layer_list[l].earth_field = new_layer_list[l].earth_field
self.layer_list[l].color = new_layer_list[l].color
# UPDATE LAYER ATTRIBUTE INPUTS
self.density_input.SetValue(0.001 * self.layer_list[self.currently_active_layer_id].density)
self.ref_density_input.SetValue(0.001 * self.layer_list[self.currently_active_layer_id].reference_density)
self.susceptibility_input.SetValue(self.layer_list[self.currently_active_layer_id].susceptibility)
self.angle_a_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_a)
self.angle_b_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_b)
self.angle_c_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_c)
self.earth_field_input.SetValue(self.layer_list[self.currently_active_layer_id].earth_field)
# UPDATE GMG STATE
self.update_layer_data()
self.run_algorithms()
self.draw()
# LIVE GRAPHICS UPDATES~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def update_layer_data(self):
"""UPDATE PROGRAM GRAPHICS AFTER A CHANGE IS MADE - A.K.A REDRAW EVERYTHING"""
# UPDATE FRAME LIMITS
xmin, xmax = self.model_frame.get_xlim()
if self.topo_frame:
self.topo_frame.set_xlim(xmin, xmax)
self.topo_d_frame.set_xlim(xmin, xmax)
if self.gravity_frame:
self.gravity_frame.set_xlim(xmin, xmax)
self.gravity_d_frame.set_xlim(xmin, xmax)
if self.magnetic_frame:
self.magnetic_frame.set_xlim(xmin, xmax)
self.magnetic_d_frame.set_xlim(xmin, xmax)
if self.fault_picking_switch is True:
# GMG IS IN FAULT MODE
# UPDATE FAULT NODES
self.fault_list[self.currently_active_fault_id].x_nodes = self.xt
self.fault_list[self.currently_active_fault_id].y_nodes = self.yt
# UPDATE FAULT MPL ACTOR
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_xdata(self.xt)
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_ydata(self.yt)
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_color(
self.fault_list[self.currently_active_fault_id].color)
# UPDATE CURRENT PLOT GRAPHICS
self.currently_active_fault.set_data(self.xt, self.yt)
self.currently_active_fault.set_color(self.fault_list[self.currently_active_fault_id].color)
else:
# GMG IS IN LAYER MODE
# UPDATE PLOT LISTS WITH LATEST EDIT
self.layer_list[self.currently_active_layer_id].x_nodes = self.current_x_nodes
self.layer_list[self.currently_active_layer_id].y_nodes = self.current_y_nodes
# CREATE UPDATED POLYGON XYs -------------------------------------------------------------------------------
# FIRST CREATE THE POLYLINE DATA (THE BOTTOM LINE OF THE LAYER POLYGON - (THIS DONE FIRST SO THE WHOLE
# POLYGON ISN'T PASSED TO SELF.POLYPLOTS)
for i in range(0, self.total_layer_count + 1):
# CREATE THE | |
<filename>groups/bsl/bslmf/bslmf.gyp
{
'variables': {
'bslmf_sources': [
'bslmf_addconst.cpp',
'bslmf_addcv.cpp',
'bslmf_addlvaluereference.cpp',
'bslmf_addpointer.cpp',
'bslmf_addreference.cpp',
'bslmf_addrvaluereference.cpp',
'bslmf_addvolatile.cpp',
'bslmf_arraytopointer.cpp',
'bslmf_assert.cpp',
'bslmf_conditional.cpp',
'bslmf_detectnestedtrait.cpp',
'bslmf_enableif.cpp',
'bslmf_forwardingtype.cpp',
'bslmf_functionpointertraits.cpp',
'bslmf_haspointersemantics.cpp',
'bslmf_if.cpp',
'bslmf_integralconstant.cpp',
'bslmf_isarithmetic.cpp',
'bslmf_isarray.cpp',
'bslmf_isbitwiseequalitycomparable.cpp',
'bslmf_isbitwisemoveable.cpp',
'bslmf_isclass.cpp',
'bslmf_isconst.cpp',
'bslmf_isconvertible.cpp',
'bslmf_isconvertibletoany.cpp',
'bslmf_isenum.cpp',
'bslmf_isfloatingpoint.cpp',
'bslmf_isfunction.cpp',
'bslmf_isfundamental.cpp',
'bslmf_isintegral.cpp',
'bslmf_islvaluereference.cpp',
'bslmf_ismemberfunctionpointer.cpp',
'bslmf_ismemberobjectpointer.cpp',
'bslmf_ismemberpointer.cpp',
'bslmf_ispair.cpp',
'bslmf_ispointer.cpp',
'bslmf_ispointertomember.cpp',
'bslmf_ispolymorphic.cpp',
'bslmf_isreference.cpp',
'bslmf_isrvaluereference.cpp',
'bslmf_issame.cpp',
'bslmf_istriviallycopyable.cpp',
'bslmf_istriviallydefaultconstructible.cpp',
'bslmf_isvoid.cpp',
'bslmf_isvolatile.cpp',
'bslmf_matchanytype.cpp',
'bslmf_matcharithmetictype.cpp',
'bslmf_memberfunctionpointertraits.cpp',
'bslmf_metaint.cpp',
'bslmf_nestedtraitdeclaration.cpp',
'bslmf_nil.cpp',
'bslmf_removeconst.cpp',
'bslmf_removecv.cpp',
'bslmf_removecvq.cpp',
'bslmf_removepointer.cpp',
'bslmf_removereference.cpp',
'bslmf_removevolatile.cpp',
'bslmf_selecttrait.cpp',
'bslmf_switch.cpp',
'bslmf_tag.cpp',
'bslmf_typelist.cpp',
],
'bslmf_tests': [
'bslmf_addconst.t',
'bslmf_addcv.t',
'bslmf_addlvaluereference.t',
'bslmf_addpointer.t',
'bslmf_addreference.t',
'bslmf_addrvaluereference.t',
'bslmf_addvolatile.t',
'bslmf_arraytopointer.t',
'bslmf_assert.t',
'bslmf_conditional.t',
'bslmf_detectnestedtrait.t',
'bslmf_enableif.t',
'bslmf_forwardingtype.t',
'bslmf_functionpointertraits.t',
'bslmf_haspointersemantics.t',
'bslmf_if.t',
'bslmf_integralconstant.t',
'bslmf_isarithmetic.t',
'bslmf_isarray.t',
'bslmf_isbitwiseequalitycomparable.t',
'bslmf_isbitwisemoveable.t',
'bslmf_isclass.t',
'bslmf_isconst.t',
'bslmf_isconvertible.t',
'bslmf_isconvertibletoany.t',
'bslmf_isenum.t',
'bslmf_isfloatingpoint.t',
'bslmf_isfunction.t',
'bslmf_isfundamental.t',
'bslmf_isintegral.t',
'bslmf_islvaluereference.t',
'bslmf_ismemberfunctionpointer.t',
'bslmf_ismemberobjectpointer.t',
'bslmf_ismemberpointer.t',
'bslmf_ispair.t',
'bslmf_ispointer.t',
'bslmf_ispointertomember.t',
'bslmf_ispolymorphic.t',
'bslmf_isreference.t',
'bslmf_isrvaluereference.t',
'bslmf_issame.t',
'bslmf_istriviallycopyable.t',
'bslmf_istriviallydefaultconstructible.t',
'bslmf_isvoid.t',
'bslmf_isvolatile.t',
'bslmf_matchanytype.t',
'bslmf_matcharithmetictype.t',
'bslmf_memberfunctionpointertraits.t',
'bslmf_metaint.t',
'bslmf_nestedtraitdeclaration.t',
'bslmf_nil.t',
'bslmf_removeconst.t',
'bslmf_removecv.t',
'bslmf_removecvq.t',
'bslmf_removepointer.t',
'bslmf_removereference.t',
'bslmf_removevolatile.t',
'bslmf_selecttrait.t',
'bslmf_switch.t',
'bslmf_tag.t',
'bslmf_typelist.t',
],
'bslmf_tests_paths': [
'<(PRODUCT_DIR)/bslmf_addconst.t',
'<(PRODUCT_DIR)/bslmf_addcv.t',
'<(PRODUCT_DIR)/bslmf_addlvaluereference.t',
'<(PRODUCT_DIR)/bslmf_addpointer.t',
'<(PRODUCT_DIR)/bslmf_addreference.t',
'<(PRODUCT_DIR)/bslmf_addrvaluereference.t',
'<(PRODUCT_DIR)/bslmf_addvolatile.t',
'<(PRODUCT_DIR)/bslmf_arraytopointer.t',
'<(PRODUCT_DIR)/bslmf_assert.t',
'<(PRODUCT_DIR)/bslmf_conditional.t',
'<(PRODUCT_DIR)/bslmf_detectnestedtrait.t',
'<(PRODUCT_DIR)/bslmf_enableif.t',
'<(PRODUCT_DIR)/bslmf_forwardingtype.t',
'<(PRODUCT_DIR)/bslmf_functionpointertraits.t',
'<(PRODUCT_DIR)/bslmf_haspointersemantics.t',
'<(PRODUCT_DIR)/bslmf_if.t',
'<(PRODUCT_DIR)/bslmf_integralconstant.t',
'<(PRODUCT_DIR)/bslmf_isarithmetic.t',
'<(PRODUCT_DIR)/bslmf_isarray.t',
'<(PRODUCT_DIR)/bslmf_isbitwiseequalitycomparable.t',
'<(PRODUCT_DIR)/bslmf_isbitwisemoveable.t',
'<(PRODUCT_DIR)/bslmf_isclass.t',
'<(PRODUCT_DIR)/bslmf_isconst.t',
'<(PRODUCT_DIR)/bslmf_isconvertible.t',
'<(PRODUCT_DIR)/bslmf_isconvertibletoany.t',
'<(PRODUCT_DIR)/bslmf_isenum.t',
'<(PRODUCT_DIR)/bslmf_isfloatingpoint.t',
'<(PRODUCT_DIR)/bslmf_isfunction.t',
'<(PRODUCT_DIR)/bslmf_isfundamental.t',
'<(PRODUCT_DIR)/bslmf_isintegral.t',
'<(PRODUCT_DIR)/bslmf_islvaluereference.t',
'<(PRODUCT_DIR)/bslmf_ismemberfunctionpointer.t',
'<(PRODUCT_DIR)/bslmf_ismemberobjectpointer.t',
'<(PRODUCT_DIR)/bslmf_ismemberpointer.t',
'<(PRODUCT_DIR)/bslmf_ispair.t',
'<(PRODUCT_DIR)/bslmf_ispointer.t',
'<(PRODUCT_DIR)/bslmf_ispointertomember.t',
'<(PRODUCT_DIR)/bslmf_ispolymorphic.t',
'<(PRODUCT_DIR)/bslmf_isreference.t',
'<(PRODUCT_DIR)/bslmf_isrvaluereference.t',
'<(PRODUCT_DIR)/bslmf_issame.t',
'<(PRODUCT_DIR)/bslmf_istriviallycopyable.t',
'<(PRODUCT_DIR)/bslmf_istriviallydefaultconstructible.t',
'<(PRODUCT_DIR)/bslmf_isvoid.t',
'<(PRODUCT_DIR)/bslmf_isvolatile.t',
'<(PRODUCT_DIR)/bslmf_matchanytype.t',
'<(PRODUCT_DIR)/bslmf_matcharithmetictype.t',
'<(PRODUCT_DIR)/bslmf_memberfunctionpointertraits.t',
'<(PRODUCT_DIR)/bslmf_metaint.t',
'<(PRODUCT_DIR)/bslmf_nestedtraitdeclaration.t',
'<(PRODUCT_DIR)/bslmf_nil.t',
'<(PRODUCT_DIR)/bslmf_removeconst.t',
'<(PRODUCT_DIR)/bslmf_removecv.t',
'<(PRODUCT_DIR)/bslmf_removecvq.t',
'<(PRODUCT_DIR)/bslmf_removepointer.t',
'<(PRODUCT_DIR)/bslmf_removereference.t',
'<(PRODUCT_DIR)/bslmf_removevolatile.t',
'<(PRODUCT_DIR)/bslmf_selecttrait.t',
'<(PRODUCT_DIR)/bslmf_switch.t',
'<(PRODUCT_DIR)/bslmf_tag.t',
'<(PRODUCT_DIR)/bslmf_typelist.t',
],
'bslmf_pkgdeps': [
'../bsls/bsls.gyp:bsls',
'../bslscm/bslscm.gyp:bslscm',
],
},
'targets': [
{
'target_name': 'bslmf_sources',
'type': 'none',
'direct_dependent_settings': {
'sources': [ '<@(bslmf_sources)' ],
'include_dirs': [ '.' ],
},
},
{
'target_name': 'bslmf_tests_build',
'type': 'none',
'dependencies': [ '<@(bslmf_tests)' ],
},
{
'target_name': 'bslmf_tests_run',
'type': 'none',
'dependencies': [ 'bslmf_tests_build' ],
'sources': [ '<@(bslmf_tests_paths)' ],
'rules': [
{
'rule_name': 'run_unit_tests',
'extension': 't',
'inputs': [ '<@(bslmf_tests_paths)' ],
'outputs': [ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).t.ran' ],
'action': [ '<(python_path)', '<(DEPTH)/tools/run_unit_tests.py',
'<(RULE_INPUT_PATH)',
'<@(_outputs)',
'--abi=<(ABI_bits)',
'--lib=<(library)'
],
'msvs_cygwin_shell': 0,
},
],
},
{
'target_name': 'bslmf',
'type': '<(library)',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)',
'bslmf_sources', ],
'export_dependent_settings': [ '<@(bslmf_pkgdeps)' ],
'direct_dependent_settings': { 'include_dirs': [ '.' ] },
# Mac OS X empty LD_DYLIB_INSTALL_NAME causes executable and shared
# libraries linking against dylib to store same path for use at runtime
'xcode_settings': { 'LD_DYLIB_INSTALL_NAME': '' },
},
{
'target_name': 'bslmf_addconst.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addconst.t.cpp' ],
},
{
'target_name': 'bslmf_addcv.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addcv.t.cpp' ],
},
{
'target_name': 'bslmf_addlvaluereference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addlvaluereference.t.cpp' ],
},
{
'target_name': 'bslmf_addpointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addpointer.t.cpp' ],
},
{
'target_name': 'bslmf_addreference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addreference.t.cpp' ],
},
{
'target_name': 'bslmf_addrvaluereference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addrvaluereference.t.cpp' ],
},
{
'target_name': 'bslmf_addvolatile.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addvolatile.t.cpp' ],
},
{
'target_name': 'bslmf_arraytopointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_arraytopointer.t.cpp' ],
},
{
'target_name': 'bslmf_assert.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_assert.t.cpp' ],
},
{
'target_name': 'bslmf_conditional.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_conditional.t.cpp' ],
},
{
'target_name': 'bslmf_detectnestedtrait.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_detectnestedtrait.t.cpp' ],
},
{
'target_name': 'bslmf_enableif.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_enableif.t.cpp' ],
},
{
'target_name': 'bslmf_forwardingtype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_forwardingtype.t.cpp' ],
},
{
'target_name': 'bslmf_functionpointertraits.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_functionpointertraits.t.cpp' ],
},
{
'target_name': 'bslmf_haspointersemantics.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_haspointersemantics.t.cpp' ],
},
{
'target_name': 'bslmf_if.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_if.t.cpp' ],
},
{
'target_name': 'bslmf_integralconstant.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_integralconstant.t.cpp' ],
},
{
'target_name': 'bslmf_isarithmetic.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isarithmetic.t.cpp' ],
},
{
'target_name': 'bslmf_isarray.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isarray.t.cpp' ],
},
{
'target_name': 'bslmf_isbitwiseequalitycomparable.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isbitwiseequalitycomparable.t.cpp' ],
},
{
'target_name': 'bslmf_isbitwisemoveable.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isbitwisemoveable.t.cpp' ],
},
{
'target_name': 'bslmf_isclass.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isclass.t.cpp' ],
},
{
'target_name': 'bslmf_isconst.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isconst.t.cpp' ],
},
{
'target_name': 'bslmf_isconvertible.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isconvertible.t.cpp' ],
},
{
'target_name': 'bslmf_isconvertibletoany.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isconvertibletoany.t.cpp' ],
},
{
'target_name': 'bslmf_isenum.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isenum.t.cpp' ],
},
{
'target_name': 'bslmf_isfloatingpoint.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isfloatingpoint.t.cpp' ],
},
{
'target_name': 'bslmf_isfunction.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isfunction.t.cpp' ],
},
{
'target_name': 'bslmf_isfundamental.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isfundamental.t.cpp' ],
},
{
'target_name': 'bslmf_isintegral.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isintegral.t.cpp' ],
},
{
'target_name': 'bslmf_islvaluereference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_islvaluereference.t.cpp' ],
},
{
'target_name': 'bslmf_ismemberfunctionpointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ismemberfunctionpointer.t.cpp' ],
},
{
'target_name': 'bslmf_ismemberobjectpointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ismemberobjectpointer.t.cpp' ],
},
{
'target_name': 'bslmf_ismemberpointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ismemberpointer.t.cpp' ],
},
{
'target_name': 'bslmf_ispair.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ispair.t.cpp' ],
},
{
'target_name': 'bslmf_ispointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ispointer.t.cpp' ],
},
{
'target_name': 'bslmf_ispointertomember.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ispointertomember.t.cpp' ],
},
{
'target_name': 'bslmf_ispolymorphic.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ispolymorphic.t.cpp' ],
},
{
'target_name': 'bslmf_isreference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isreference.t.cpp' ],
},
{
'target_name': 'bslmf_isrvaluereference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isrvaluereference.t.cpp' ],
},
{
'target_name': 'bslmf_issame.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_issame.t.cpp' ],
},
{
'target_name': 'bslmf_istriviallycopyable.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_istriviallycopyable.t.cpp' ],
},
{
'target_name': 'bslmf_istriviallydefaultconstructible.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_istriviallydefaultconstructible.t.cpp' ],
},
{
'target_name': 'bslmf_isvoid.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isvoid.t.cpp' ],
},
{
'target_name': 'bslmf_isvolatile.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isvolatile.t.cpp' ],
},
{
'target_name': 'bslmf_matchanytype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_matchanytype.t.cpp' ],
},
{
'target_name': 'bslmf_matcharithmetictype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_matcharithmetictype.t.cpp' ],
},
{
'target_name': 'bslmf_memberfunctionpointertraits.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ | |
# -*- coding: utf-8 -*-
"""Functions for generating training data and training networks."""
from __future__ import division
from __future__ import print_function
import collections
import copy
import itertools
import logging
import random
# for debugging
logging.basicConfig(level=logging.DEBUG)
import matplotlib as mpl
# Use the 'Agg' backend to allow the generation of plots even if no X server
# is available. The matplotlib backend must be set before importing pyplot.
mpl.use('Agg') # noqa
import matplotlib.pyplot as plt
import numpy as np
import six
from six.moves import range as xrange
import tensorflow as tf
from tqdm import tqdm
import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import (
Callback,
EarlyStopping,
ModelCheckpoint,
TensorBoard,
)
from .config import CONFIG
from .network import compile_network, load_model, make_parallel
from .util import (
get_color_shader,
get_function,
pad_dims,
Roundrobin,
WrappedViewer,
write_keras_history_to_csv,
)
from .volumes import (
ClipSubvolumeImageGenerator,
ContrastAugmentGenerator,
ErodedMaskGenerator,
GaussianNoiseAugmentGenerator,
MaskedArtifactAugmentGenerator,
MirrorAugmentGenerator,
MissingDataAugmentGenerator,
partition_volumes,
PermuteAxesAugmentGenerator,
RelabelSeedComponentGenerator,
)
from .regions import (
Region,
)
def plot_history(history):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(history.history['loss'])
ax.plot(history.history['val_loss'])
ax.plot(history.history['val_subv_metric'])
fig.suptitle('model loss')
ax.set_ylabel('loss')
ax.set_xlabel('epoch')
ax.legend(['train', 'validation', 'val subvolumes'], loc='upper right')
return fig
def patch_prediction_copy(model):
"""Patch a Keras model to copy outputs to a kludge during training.
This is necessary for mask updates to a region during training.
Parameters
----------
model : keras.engine.Model
"""
model.train_function = None
model.test_function = None
model._orig_train_on_batch = model.train_on_batch
def train_on_batch(self, x, y, **kwargs):
kludge = x.pop('kludge', None)
outputs = self._orig_train_on_batch(x, y, **kwargs)
kludge['outputs'] = outputs.pop()
if len(outputs) == 1:
return outputs[0]
return outputs
model.train_on_batch = six.create_bound_method(train_on_batch, model)
model._orig_test_on_batch = model.test_on_batch
def test_on_batch(self, x, y, **kwargs):
kludge = x.pop('kludge', None)
outputs = self._orig_test_on_batch(x, y, **kwargs)
kludge['outputs'] = outputs.pop()
if len(outputs) == 1:
return outputs[0]
return outputs
model.test_on_batch = six.create_bound_method(test_on_batch, model)
# Below is copied and modified from Keras Model._make_train_function.
# The only change is the addition of `self.outputs` to the train function.
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
if self.train_function is None:
inputs = self._feed_inputs + self._feed_targets + self._feed_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
with K.name_scope('training'):
with K.name_scope(self.optimizer.__class__.__name__):
training_updates = self.optimizer.get_updates(
params=self._collected_trainable_weights,
loss=self.total_loss)
updates = self.updates + training_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors + self.outputs,
updates=updates,
name='train_function',
**self._function_kwargs)
model._make_train_function = six.create_bound_method(_make_train_function, model)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = self._feed_inputs + self._feed_targets + self._feed_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors + self.outputs,
updates=self.state_updates,
name='test_function',
**self._function_kwargs)
model._make_test_function = six.create_bound_method(_make_test_function, model)
class GeneratorReset(Callback):
"""Keras epoch end callback to reset prediction copy kludges.
"""
def __init__(self, gens):
self.gens = gens
def on_epoch_end(self, epoch, logs=None):
for gen in self.gens:
gen.reset()
class GeneratorSubvolumeMetric(Callback):
"""Add a data generator's subvolume metric to Keras' metric logs.
Parameters
----------
gens : iterable of diluvian.training.MovingTrainingGenerator
metric_name : string
"""
def __init__(self, gens, metric_name):
self.gens = gens
self.metric_name = metric_name
def on_epoch_end(self, epoch, logs=None):
if self.metric_name not in self.params['metrics']:
self.params['metrics'].append(self.metric_name)
if logs:
metric = np.mean([np.mean(gen.get_epoch_metric()) for gen in self.gens])
logs[self.metric_name] = metric
class EarlyAbortException(Exception):
pass
class EarlyAbort(Callback):
"""Keras epoch end callback that aborts if a metric is above a threshold.
This is useful when convergence is sensitive to initial conditions and
models are obviously not useful to continue training after only a few
epochs. Unlike the early stopping callback, this is considered an
abnormal termination and throws an exception so that behaviors like
restarting with a new random seed are possible.
"""
def __init__(self, monitor='val_loss', threshold_epoch=None, threshold_value=None):
if threshold_epoch is None or threshold_value is None:
raise ValueError('Epoch and value to enforce threshold must be provided.')
self.monitor = monitor
self.threshold_epoch = threshold_epoch - 1
self.threshold_value = threshold_value
def on_epoch_end(self, epoch, logs=None):
if epoch == self.threshold_epoch:
current = logs.get(self.monitor)
if current >= self.threshold_value:
raise EarlyAbortException('Aborted after epoch {} because {} was {} >= {}'.format(
self.threshold_epoch, self.monitor, current, self.threshold_value))
def preprocess_subvolume_generator(subvolume_generator):
"""Apply non-augmentation preprocessing to a subvolume generator.
Parameters
----------
subvolume_generator : diluvian.volumes.SubvolumeGenerator
Returns
-------
diluvian.volumes.SubvolumeGenerator
"""
gen = subvolume_generator
if np.any(CONFIG.training.label_erosion):
gen = ErodedMaskGenerator(gen, CONFIG.training.label_erosion)
if CONFIG.training.relabel_seed_component:
gen = RelabelSeedComponentGenerator(gen)
return gen
def augment_subvolume_generator(subvolume_generator):
"""Apply data augmentations to a subvolume generator.
Parameters
----------
subvolume_generator : diluvian.volumes.SubvolumeGenerator
Returns
-------
diluvian.volumes.SubvolumeGenerator
"""
gen = subvolume_generator
for axes in CONFIG.training.augment_permute_axes:
gen = PermuteAxesAugmentGenerator(gen, CONFIG.training.augment_use_both, axes)
for axis in CONFIG.training.augment_mirrors:
gen = MirrorAugmentGenerator(gen, CONFIG.training.augment_use_both, axis)
for v in CONFIG.training.augment_noise:
gen = GaussianNoiseAugmentGenerator(gen, CONFIG.training.augment_use_both, v['axis'], v['mul'], v['add'])
for v in CONFIG.training.augment_artifacts:
if 'cache' not in v:
v['cache'] = {}
gen = MaskedArtifactAugmentGenerator(gen, CONFIG.training.augment_use_both,
v['axis'], v['prob'], v['volume_file'], v['cache'])
for v in CONFIG.training.augment_missing_data:
gen = MissingDataAugmentGenerator(gen, CONFIG.training.augment_use_both, v['axis'], v['prob'])
for v in CONFIG.training.augment_contrast:
gen = ContrastAugmentGenerator(gen, CONFIG.training.augment_use_both, v['axis'], v['prob'],
v['scaling_mean'], v['scaling_std'],
v['center_mean'], v['center_std'])
gen = ClipSubvolumeImageGenerator(gen)
return gen
class MovingTrainingGenerator(six.Iterator):
"""Generate Keras moving FOV training tuples from a subvolume generator.
This generator expects a subvolume generator that will provide subvolumes
larger than the network FOV, and will allow the output of training at one
batch to generate moves within these subvolumes to produce training data
for the subsequent batch.
Parameters
----------
subvolumes : generator of Subvolume
batch_size : int
kludge : dict
A kludge object to allow this generator to provide inputs and receive
outputs from the network.
See ``diluvian.training.patch_prediction_copy``.
f_a_bins : sequence of float, optional
Bin boundaries for filling fractions. If provided, sample loss will be
weighted to increase loss contribution from less-frequent f_a bins.
Otherwise all samples are weighted equally.
reset_generators : bool
Whether to reset subvolume generators when this generator is reset.
If true subvolumes will be sampled in the same order each epoch.
subv_per_epoch : int, optional
If specified, the generator will only return moves from this many
subvolumes before being reset. Once this number of subvolumes is
exceeded, the generator will yield garbage batches (this is
necessary because Keras currently uses a fixed number of batches
per epoch). If specified, once each subvolume is complete its
total loss will be calculated.
subv_metric_fn : function, option
Metric function to run on subvolumes when `subv_per_epoch` is set.
subv_metric_threshold : bool, optional
Whether to threshold subvolume masks for metrics.
subv_metric_args : dict, optional
Keyword arguments that will be passed to the subvolume metric.
"""
def __init__(self, subvolumes, batch_size, kludge,
f_a_bins=None, reset_generators=True, subv_per_epoch=None,
subv_metric_fn=None, subv_metric_threshold=False, subv_metric_args=None):
self.subvolumes = subvolumes
self.batch_size = batch_size
self.kludge = kludge
self.reset_generators = reset_generators
self.subv_per_epoch = subv_per_epoch
self.subv_metric_fn = subv_metric_fn
self.subv_metric_threshold = subv_metric_threshold
self.subv_metric_args = subv_metric_args
if self.subv_metric_args is None:
self.subv_metric_args = {}
self.regions = [None] * batch_size
self.region_pos = [None] * batch_size
self.move_counts = [0] * batch_size
self.epoch_move_counts = []
self.epoch_subv_metrics = []
self.epoch_subvolumes = 0
self.batch_image_input = [None] * batch_size
self.f_a_bins = f_a_bins
self.f_a_init = False
if f_a_bins is not None:
self.f_a_init = True
self.f_a_counts = np.ones_like(f_a_bins, dtype=np.int64)
self.f_as = np.zeros(batch_size)
self.fake_block = None
self.fake_mask = [False] * batch_size
def __iter__(self):
return self
def reset(self):
self.f_a_init = False
if self.reset_generators:
self.subvolumes.reset()
self.regions = [None] * self.batch_size
self.kludge['inputs'] = None
self.kludge['outputs'] = None
if len(self.epoch_move_counts):
logging.info(' Average moves (%s): %s',
self.subvolumes.name,
sum(self.epoch_move_counts)/float(len(self.epoch_move_counts)))
self.epoch_move_counts = []
self.epoch_subvolumes = 0
self.epoch_subv_metrics = []
self.fake_mask = [False] * self.batch_size
def get_epoch_metric(self):
assert len(self.epoch_subv_metrics) == self.subv_per_epoch, \
'Not all validation subvs completed: {}/{} (Finished moves: {}, ongoing: {})'.format(
len(self.epoch_subv_metrics), self.subv_per_epoch, self.epoch_move_counts, self.move_counts)
return self.epoch_subv_metrics
def __next__(self):
# If in the fixed-subvolumes-per-epoch mode and completed, yield fake
# data quickly.
if all(self.fake_mask):
inputs = collections.OrderedDict({
'image_input': np.repeat(pad_dims(self.fake_block['image']),
CONFIG.training.num_gpus, axis=0),
'mask_input': np.repeat(pad_dims(self.fake_block['mask']),
CONFIG.training.num_gpus, axis=0)
})
inputs['kludge'] = self.kludge
outputs = np.repeat(pad_dims(self.fake_block['target']), CONFIG.training.num_gpus, axis=0)
return (inputs, outputs)
# Before clearing last batches, reuse them to predict mask outputs
# for move training. Add mask outputs to regions.
active_regions = [n for n, region in enumerate(self.regions) if region is not None]
if active_regions and self.kludge['outputs'] is not None and self.kludge['inputs'] is not None:
for n in active_regions:
assert np.array_equal(self.kludge['inputs'][n, :],
self.batch_image_input[n, 0, 0, :, 0])
self.regions[n].add_mask(self.kludge['outputs'][n, :, :, :, 0], self.region_pos[n])
self.batch_image_input = [None] * self.batch_size
batch_mask_input = [None] * self.batch_size
batch_mask_target = [None] * self.batch_size
for r, region in enumerate(self.regions):
block_data = region.get_next_block() | |
<reponame>dib-lab/2018-snakemake-eel-pond
#! /usr/bin/env python
"""
Execution script for snakemake elvers.
"""
# ref: https://github.com/ctb/2018-snakemake-cli/blob/master/run
import argparse
import os
import sys
import pprint
import yaml
import glob
import collections
import snakemake
import shutil
import subprocess
from snakemake.utils import validate
from .utils.utils import *
from .utils.pretty_config import pretty_name, write_config
from .utils.print_workflow_options import print_available_workflows_and_tools
from .utils.capture_stdout import CaptureStdout
from .utils.generate_yaml_schema import *
from . import _program
def build_default_params(workdir, targets):
defaultParams = {}
# first, figure out which parts of the pipeline are being run, and get those defaults
pipeline_defaultsFile = find_input_file(os.path.join('utils', 'pipeline_defaults'), 'pipeline_defaults', add_paths = [workdir])
pipeline_defaults = read_yaml(pipeline_defaultsFile)
# grab general defaults
defaultParams['basename'] = pipeline_defaults['basename']
# add main directories, outdirs available to all workflows
defaultParams['elvers_directories'] = pipeline_defaults['elvers_directories']
# grab all available workflows, and subset by user input
ep_workflows = pipeline_defaults['elvers_workflows']
workflows_to_run = {k: ep_workflows[k] for k in ep_workflows.keys() & targets}
defaultParams['elvers_workflows'] = workflows_to_run
# find targs the user entered that are not in our default info.
extra_targs = [t for t in targets if t not in ep_workflows]
for e in extra_targs: # assume extra targets are single rules, and add to the workflow
workflows_to_run[e] = [e]
# For each rule in the desired workflow, save rulename and grab rule params
required_rules = []
for rule_list in workflows_to_run.values():
required_rules += rule_list
ruleParamsFiles = []
includeRules = []
reference_extensions = []
rules_dir = defaultParams['elvers_directories']['rules']
required_rules = set(required_rules)
for rule_name in required_rules:
try:
rule = glob.glob(os.path.join(workdir, rules_dir, '*', rule_name + '.rule'))[0]
defaultParams[rule_name] = get_params(rule_name, os.path.dirname(rule))
ref_exts = defaultParams[rule_name]['elvers_params']['outputs']['extensions'].get('reference_extensions', [])
reference_extensions+=ref_exts
includeRules += [rule]
except: # if allows user workflow input, can't do this here (check extra targs later?)
sys.stderr.write(f"\n\tError: Can't add rules for extra target {rule_name}. Please fix.\n\n")
sys.exit(-1)
defaultParams['include_rules'] = list(set(includeRules))
defaultParams['reference_extensions'] = list(set(reference_extensions))
return defaultParams
def build_dirs(ep_dir, params):
''' function to build full paths for all directories '''
# build main elvers dir info
params['elvers_directories']['base_dir'] = ep_dir
params['elvers_directories']['rules'] = os.path.join(ep_dir, params['elvers_directories']['rules'])
params['elvers_directories']['animals'] = os.path.join(ep_dir, params['elvers_directories']['animals'])
# if desired, user can also provide out_path, and all dirs will be built under there
out_path = params.get('out_path', os.getcwd())
out_path = os.path.expanduser(out_path) # expand any `~` on unix
if os.path.isabs(out_path): # if user inputs an absolute path, check that it exists!
assert os.path.exists(out_path) and os.path.isdir(out_path), f"Error: provided output path {out_path} is not an existing directory. Please fix.\n\n"
else: # if not absolute, assume subdirectory of base elvers dir
out_path = os.path.join(ep_dir, out_path)
# allow user to define basename, and experiment, build outdir name
basename = params['basename']
expt = params.get('experiment', '')
if expt and not expt.startswith('_'):
expt= '_' + expt
outdir = basename + "_out" + expt
# Now join out_path, outdir name
out_path = os.path.realpath(out_path)
outdir = os.path.join(out_path, outdir)
# when using out_path, need to manually build the outdir (snakemake will not automatically create it)
if not os.path.exists(outdir):
os.makedirs(outdir)
# add full path info to the config
params['elvers_directories']['out_dir'] = outdir # outdir NAME
params['elvers_directories']['logs'] = join(outdir, params['elvers_directories']['logs'])
# build dirs for main elvers output directories
outDirs = params['elvers_directories']['outdirs']
for targ, outD in outDirs.items():
outDirs[targ] = os.path.join(outdir, outD)
# put joined paths back in params file
params['elvers_directories']['outdirs'] = outDirs
# build dirs for included rules
included_rules = params['include_rules']
for rule in included_rules:
prog = os.path.basename(rule).split('.rule')[0]
# if no outdir, just use program name
prog_dir = params[prog]['elvers_params']['outputs'].get('outdir', prog)
params[prog]['elvers_params']['outputs']['outdir'] = os.path.join(outdir, prog_dir)
return params
def main():
parser = argparse.ArgumentParser(prog = _program, description='run snakemake elvers', usage='''elvers <configfile.yaml> [<target> ...]
Run elvers snakemake workflows, using the given configfile.
Available Workflows:
default - run full eel_pond workflow
preprocess - preprocess reads
assemble - transcriptome assembly
annotate - annotate transcriptome assembly
quantify - read quantification
diffexp - conduct differential expression
For a quickstart, run this:
elvers examples/nema.yaml
from the main elvers directory.
To build an editable configfile to start work on your own data, run:
elvers my_config --build_config
''')
parser.add_argument('configfile')
parser.add_argument('targets', nargs='*', default=['default'])
parser.add_argument('-t', '--threads', type=int, default=1)
parser.add_argument('--extra_config', action='append', default = None)
parser.add_argument('--config_dict', type=yaml.safe_load, default = None)
parser.add_argument('--out_path', type=str, default = None)
parser.add_argument('-n', '--dry-run', action='store_true')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-w', '--print_workflows', action='store_true', help='just show available workflows')
parser.add_argument('-r', '--print_rules', action='store_true', help='just show available rules')
parser.add_argument('-p', '--print_params', action='store_true', help='print parameters for chosen workflows or rules')
parser.add_argument('--build_config', action='store_true', help='just build the default parameter file')
parser.add_argument('--cluster_config', default=None)
parser.add_argument('--cluster_cmd', default=None)
# advanced args below (maybe separate so these don't always print out)
parser.add_argument('--report', default="report.html", help='filename for a final report of this run. This will be in the logs dir, unless you provide an absolute path.')
parser.add_argument('--conda_prefix', default=None, help='location for conda environment installs')
parser.add_argument('--create_envs_only', action='store_true', help="just install software in conda envs, don't execute workflows")
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('--dag', action='store_true', help='boolean: output a flowchart of the directed acyclic graph of this workflow in graphviz dot format (does not require/run dot)')
parser.add_argument('--dagfile', default=None, help='filename to output a flowchart of the directed acyclic graph of this workflow in graphviz dot format')
parser.add_argument('--dagpng', default=None, help='filename to output a flowchart of the directed acyclic graph of this workflow in png image format')
parser.add_argument('-k', '--keep_going', action='store_true')
parser.add_argument('--nolock', action='store_true')
parser.add_argument('--unlock', action='store_true')
#parser.add_argument('--cleanup_conda', action='store_true')
parser.add_argument('--forcetargets', action='store_true', help='force given targets to be re-created (default False)')
parser.add_argument('--forceall', action='store_true', help='force all output files to be re-created (default False)')
parser.add_argument('--restart_times',type=int, default=0, help='let snakemake try rerunning any failed tools (input number of times to try rerunning). default = 0')
args = parser.parse_args()
thisdir = os.path.abspath(os.path.dirname(__file__))
# print available workflows and rules, if desired
if args.print_workflows or args.print_rules:
pipeline_defaultsFile = find_input_file(os.path.join('utils', 'pipeline_defaults'), 'pipeline_defaults', add_paths = [thisdir])
print_available_workflows_and_tools(read_yaml(pipeline_defaultsFile), args.print_workflows, args.print_rules)
sys.exit(0)
targs = args.targets
if args.print_params:
default_params = build_default_params(thisdir, targs)
write_config(default_params, targs)
sys.exit(0)
# are we building a directed acyclic graph?
building_dag = False
if args.dag or args.dagfile or args.dagpng:
building_dag = True
# building dags precludes running any workflows
args.dry_run = True
# if user specified --dagpng,
# graphviz dot must be present
if args.dagpng:
if shutil.which('dot') is None:
sys.stderr.write(f"\n\tError: Cannot find 'dot' utility, but --dotpng flag was specified. Fix this by installing graphviz dot.\n\n")
sys.exit(-1)
# first, find the Snakefile and configfile
if not building_dag:
print('\n--------')
print('checking for required files:')
print('--------\n')
snakefile = find_Snakefile(thisdir)
if args.build_config:
configfile = args.configfile
if not any(ext in args.configfile for ext in ['.yaml', '.yml']):
configfile = args.configfile + '.yaml'
if os.path.exists(configfile):
sys.stderr.write(f"\n\tError: found configfile path at {configfile}, but you have '--build_config' specified. Please fix.\n\n")
sys.exit(-1)
default_params = build_default_params(thisdir, targs)
write_config(default_params, targs, configfile)
sys.exit(0)
else:
configfile = find_input_file(args.configfile, 'configfile', add_paths=[thisdir], verbose=True) # find configfile
if not configfile:
sys.stderr.write('Error: cannot find configfile {}\n.'.format(args.configfile))
sys.exit(-1)
# first, grab all params in user config file
configD = import_configfile(configfile)
if args.out_path:
if configD.get('out_path'):
sys.stderr.write(f"\n\tWarning: out_path specified both in config and on command line. Choosing command input {out_path}")
configD['out_path'] = args.out_path
if configD.get('workflows', None):
# how do we want to handle the 'default'? Here: If nothing specified, use `default`. If any workflows specified (commandline or config), do not add default.
if targs == ['default']:
targs = configD['workflows']
else:
targs = targs + configD['workflows']
# build info for get_reference
refInput = configD.get('get_reference', None)
if refInput:
targs+=['get_reference']
configD, refinput_ext = handle_reference_input(configD, configfile)
else:
refinput_ext = None
if 'get_reference' in targs and not refInput:
sys.stderr.write("\n\tError: trying to get reference via `get_reference` rule, but there's no reference file specified in your configfile. Please fix.\n\n")
sys.exit(-1)
# check that samples file exists, targs include get_data, and build fullpath to samples file
samples = None
if configD.get('get_data', None):
targs+=['get_data']
try:
configD = handle_samples_input(configD, configfile)
samples, configD = read_samples(configD)
except Exception as e:
sys.stderr.write("\n\tError: trying to get input data, but can't find the samples file. Please fix.\n\n")
print(e)
sys.exit(-1)
targs = list(set(targs))
# next, grab all elvers defaults, including rule-specific default parameters (*_params.yaml files)
paramsD = build_default_params(thisdir, targs)
###############
# Handle additional configuration modification
# 1. extra config files
extra_configs = {}
if args.extra_config:
for c in args.extra_config:
extra_configs = import_configfile(find_input_file(c, 'extra_config', add_paths = [thisdir], verbose =True), extra_configs)
# 2. config_dict passed in on command line
# ADVANCED ONLY - no checks in place, formatting matters. (to do: add checks)
if args.config_dict:
# adding this the same way as an extra config yaml file
update_nested_dict(extra_configs, args.config_dict)
# update_nested_dict only updates keys that already exist. so we need to wait till here | |
tcp --dport %s -j %s' % (self.port, chain_name))
ipt.add_rule('-A %s -d %s -j ACCEPT' % (chain_name, current_ip_with_netmask))
ipt.add_rule('-A %s ! -d %s -j REJECT --reject-with icmp-host-prohibited' % (chain_name, current_ip_with_netmask))
ipt.iptable_restore()
@lock.file_lock('/run/xtables.lock')
def delete(self):
assert self.vm_internal_id is not None
ipt = iptables.from_iptables_save()
chain_name = self._make_chain_name()
ipt.delete_chain(chain_name)
ipt.iptable_restore()
def find_vm_internal_ids(self, vms):
internal_ids = []
namespace_used = is_namespace_used()
for vm in vms:
if namespace_used:
vm_id_node = find_zstack_metadata_node(etree.fromstring(vm.domain_xml), 'internalId')
if vm_id_node is None:
continue
vm_id = vm_id_node.text
else:
if not vm.domain_xmlobject.has_element('metadata.internalId'):
continue
vm_id = vm.domain_xmlobject.metadata.internalId.text_
if vm_id:
internal_ids.append(vm_id)
return internal_ids
@lock.file_lock('/run/xtables.lock')
def delete_stale_chains(self):
ipt = iptables.from_iptables_save()
tbl = ipt.get_table()
if not tbl:
ipt.iptable_restore()
return
vms = get_running_vms()
internal_ids = self.find_vm_internal_ids(vms)
# delete all vnc chains
chains = tbl.children[:]
for chain in chains:
if 'vm' in chain.name and 'vnc' in chain.name:
vm_internal_id = chain.name.split('-')[1]
if vm_internal_id not in internal_ids:
ipt.delete_chain(chain.name)
logger.debug('deleted a stale VNC iptable chain[%s]' % chain.name)
ipt.iptable_restore()
def e(parent, tag, value=None, attrib={}, usenamesapce = False):
if usenamesapce:
tag = '{%s}%s' % (ZS_XML_NAMESPACE, tag)
el = etree.SubElement(parent, tag, attrib)
if value:
el.text = value
return el
def find_namespace_node(root, path, name):
ns = {'zs': ZS_XML_NAMESPACE}
ps = path.split('.')
cnode = root
for p in ps:
cnode = cnode.find(p)
if cnode is None:
return None
return cnode.find('zs:%s' % name, ns)
def find_zstack_metadata_node(root, name):
zs = find_namespace_node(root, 'metadata', 'zstack')
if zs is None:
return None
return zs.find(name)
def find_domain_cdrom_address(domain_xml, target_dev):
domain_xmlobject = xmlobject.loads(domain_xml)
disks = domain_xmlobject.devices.get_children_nodes()['disk']
for d in disks:
if d.device_ != 'cdrom':
continue
if d.get_child_node('target').dev_ != target_dev:
continue
return d.get_child_node('address')
return None
def find_domain_first_boot_device(domain_xml):
domain_xmlobject = xmlobject.loads(domain_xml)
disks = domain_xmlobject.devices.get_child_node_as_list('disk')
ifaces = domain_xmlobject.devices.get_child_node_as_list('interface')
for d in disks:
if d.get_child_node('boot') is None:
continue
if d.device_ == 'disk' and d.get_child_node('boot').order_ == '1':
return "HardDisk"
if d.device_ == 'cdrom' and d.get_child_node('boot').order_ == '1':
return "CdRom"
for i in ifaces:
if i.get_child_node('boot') is None:
continue
if i.get_child_node('boot').order_ == '1':
return "Network"
devs = domain_xmlobject.os.get_child_node_as_list('boot')
if devs and devs[0].dev_ == 'cdrom':
return "CdRom"
return "HardDisk"
def compare_version(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
LIBVIRT_VERSION = linux.get_libvirt_version()
LIBVIRT_MAJOR_VERSION = LIBVIRT_VERSION.split('.')[0]
QEMU_VERSION = linux.get_qemu_version()
def is_namespace_used():
return compare_version(LIBVIRT_VERSION, '1.3.3') >= 0
def is_hv_freq_supported():
return compare_version(QEMU_VERSION, '2.12.0') >= 0
@linux.with_arch(todo_list=['x86_64'])
def is_ioapic_supported():
return compare_version(LIBVIRT_VERSION, '3.4.0') >= 0
def is_kylin402():
zstack_release = linux.read_file('/etc/zstack-release')
if zstack_release is None:
return False
return "kylin402" in zstack_release.splitlines()[0]
def is_spiceport_driver_supported():
# qemu-system-aarch64 not supported char driver: spiceport
return shell.run("%s -h | grep 'chardev spiceport'" % kvmagent.get_qemu_path()) == 0
def is_virtual_machine():
product_name = shell.call("dmidecode -s system-product-name").strip()
return product_name == "KVM Virtual Machine" or product_name == "KVM"
def get_domain_type():
return "qemu" if HOST_ARCH == "aarch64" and is_virtual_machine() else "kvm"
def get_gic_version(cpu_num):
kernel_release = platform.release().split("-")[0]
if is_kylin402() and cpu_num <= 8 and LooseVersion(kernel_release) < LooseVersion('4.15.0'):
return 2
# Occasionally, libvirt might fail to list VM ...
def get_console_without_libvirt(vmUuid):
output = bash.bash_o("""ps x | awk '/qemu[-]kvm.*%s/{print $1, index($0, " -vnc ")}'""" % vmUuid).splitlines()
if len(output) != 1:
return None, None, None, None
pid, idx = output[0].split()
output = bash.bash_o(
"""lsof -p %s -aPi4 | awk '$8 == "TCP" { n=split($9,a,":"); print a[n] }'""" % pid).splitlines()
if len(output) < 1:
logger.warn("get_port_without_libvirt: no port found")
return None, None, None, None
# There is a port in vnc, there may be one or two porters in the spice, and two or three ports may exist in vncAndSpice.
output = output.sort()
if len(output) == 1 and int(idx) == 0:
protocol = "spice"
return protocol, None, int(output[0]), None
if len(output) == 1 and int(idx) != 0:
protocol = "vnc"
return protocol, int(output[0]), None, None
if len(output) == 2 and int(idx) == 0:
protocol = "spice"
return protocol, None, int(output[0]), int(output[1])
if len(output) == 2 and int(idx) != 0:
protocol = "vncAndSpice"
return protocol, int(output[0]), int(output[1]), None
if len(output) == 3:
protocol = "vncAndSpice"
return protocol, int(output[0]), int(output[1]), int(output[2])
logger.warn("get_port_without_libvirt: more than 3 ports")
return None, None, None, None
def check_vdi_port(vncPort, spicePort, spiceTlsPort):
if vncPort is None and spicePort is None and spiceTlsPort is None:
return False
if vncPort is not None and vncPort <= 0:
return False
if spicePort is not None and spicePort <= 0:
return False
if spiceTlsPort is not None and spiceTlsPort <= 0:
return False
return True
# get domain/bus/slot/function from pci device address
def parse_pci_device_address(addr):
domain = '0000' if len(addr.split(":")) == 2 else addr.split(":")[0]
bus = addr.split(":")[-2]
slot = addr.split(":")[-1].split(".")[0]
function = addr.split(".")[-1]
return domain, bus, slot, function
def get_machineType(machine_type):
if HOST_ARCH == "aarch64":
return "virt"
return machine_type if machine_type else "pc"
def get_sgio_value():
device_name = [x for x in os.listdir("/sys/block") if not x.startswith("loop")][0]
return "unfiltered" if os.path.isfile("/sys/block/{}/queue/unpriv_sgio".format(device_name)) else "filtered"
class LibvirtAutoReconnect(object):
conn = libvirt.open('qemu:///system')
if not conn:
raise Exception('unable to get libvirt connection')
evtMgr = LibvirtEventManagerSingleton()
libvirt_event_callbacks = {}
def __init__(self, func):
self.func = func
self.exception = None
@staticmethod
def add_libvirt_callback(id, cb):
cbs = LibvirtAutoReconnect.libvirt_event_callbacks.get(id, None)
if cbs is None:
cbs = []
LibvirtAutoReconnect.libvirt_event_callbacks[id] = cbs
cbs.append(cb)
@staticmethod
def register_libvirt_callbacks():
def reboot_callback(conn, dom, opaque):
cbs = LibvirtAutoReconnect.libvirt_event_callbacks.get(libvirt.VIR_DOMAIN_EVENT_ID_REBOOT)
if not cbs:
return
for cb in cbs:
try:
cb(conn, dom, opaque)
except:
content = traceback.format_exc()
logger.warn(content)
LibvirtAutoReconnect.conn.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_REBOOT, reboot_callback,
None)
def lifecycle_callback(conn, dom, event, detail, opaque):
cbs = LibvirtAutoReconnect.libvirt_event_callbacks.get(libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE)
if not cbs:
return
for cb in cbs:
try:
cb(conn, dom, event, detail, opaque)
except:
content = traceback.format_exc()
logger.warn(content)
LibvirtAutoReconnect.conn.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
lifecycle_callback, None)
def libvirtClosedCallback(conn, reason, opaque):
reasonStrings = (
"Error", "End-of-file", "Keepalive", "Client",
)
logger.debug("got libvirt closed callback: %s: %s" % (conn.getURI(), reasonStrings[reason]))
LibvirtAutoReconnect.conn.registerCloseCallback(libvirtClosedCallback, None)
# NOTE: the keepalive doesn't work on some libvirtd even the versions are the same
# the error is like "the caller doesn't support keepalive protocol; perhaps it's missing event loop implementation"
# def start_keep_alive(_):
# try:
# LibvirtAutoReconnect.conn.setKeepAlive(5, 3)
# return True
# except Exception as e:
# logger.warn('unable to start libvirt keep-alive, %s' % str(e))
# return False
#
# if not linux.wait_callback_success(start_keep_alive, timeout=5, interval=0.5):
# raise Exception('unable to start libvirt keep-alive after 5 seconds, see the log for detailed error')
@lock.lock('libvirt-reconnect')
def _reconnect(self):
def test_connection():
try:
LibvirtAutoReconnect.conn.getLibVersion()
VmPlugin._reload_ceph_secret_keys()
return None
except libvirt.libvirtError as ex:
return ex
ex = test_connection()
if not ex:
# the connection is ok
return
# 2nd version: 2015
logger.warn("the libvirt connection is broken, there is no safeway to auto-reconnect without fd leak, we"
" will ask the mgmt server to reconnect us after self quit")
_stop_world()
# old_conn = LibvirtAutoReconnect.conn
# LibvirtAutoReconnect.conn = libvirt.open('qemu:///system')
# if not LibvirtAutoReconnect.conn:
# raise Exception('unable to get a libvirt connection')
#
# for cid in LibvirtAutoReconnect.callback_id:
# logger.debug("remove libvirt event callback[id:%s]" % cid)
# old_conn.domainEventDeregisterAny(cid)
#
# # stop old event manager
# LibvirtAutoReconnect.evtMgr.stop()
# # create a new event manager
# LibvirtAutoReconnect.evtMgr = LibvirtEventManager()
# LibvirtAutoReconnect.register_libvirt_callbacks()
#
# # try to close the old connection anyway
# try:
# old_conn.close()
# except Exception as ee:
# logger.warn('unable to close an old libvirt exception, %s' % str(ee))
# finally:
# del old_conn
#
# ex = test_connection()
# if ex:
# # unable to reconnect, raise the error
# raise Exception('unable to get a libvirt connection, %s' % str(ex))
#
# logger.debug('successfully reconnected to the libvirt')
def __call__(self, *args, **kwargs):
try:
return self.func(LibvirtAutoReconnect.conn)
except libvirt.libvirtError as ex:
err = str(ex)
if 'client socket is closed' in err or 'Broken pipe' in err or 'invalid connection' in err:
logger.debug('socket to the libvirt is broken[%s], try reconnecting' % err)
self._reconnect()
return self.func(LibvirtAutoReconnect.conn)
else:
raise
class IscsiLogin(object):
def __init__(self):
self.server_hostname = None
self.server_port = None
self.target = None
self.chap_username = None
self.chap_password = None
self.lun = 1
@lock.lock('iscsiadm')
def login(self):
assert self.server_hostname, "hostname cannot be None"
assert self.server_port, "port cannot be None"
assert self.target, "target cannot be None"
device_path = os.path.join('/dev/disk/by-path/', 'ip-%s:%s-iscsi-%s-lun-%s' % (
self.server_hostname, self.server_port, self.target, self.lun))
shell.call('iscsiadm -m discovery -t sendtargets -p %s:%s' % (self.server_hostname, self.server_port))
if self.chap_username and self.chap_password:
shell.call(
'iscsiadm --mode node --targetname "%s" -p %s:%s --op=update --name node.session.auth.authmethod --value=CHAP' % (
self.target, self.server_hostname, self.server_port))
shell.call(
'iscsiadm --mode node --targetname "%s" -p %s:%s --op=update --name node.session.auth.username --value=%s' % (
self.target, self.server_hostname, self.server_port, self.chap_username))
shell.call(
'iscsiadm --mode node --targetname "%s" -p %s:%s --op=update --name node.session.auth.password --value=%s' | |
3: [
# vertices
(0, 0), (0, 3), (3, 3), (3, 0),
# edges
(1, 0), (2, 0),
(3, 1), (3, 2),
(1, 3), (2, 3),
(0, 1), (0, 2),
# volume
(1, 1), (2, 1),
(1, 2), (2, 2),
],
}[self.order]
class GmshHexahedralElement(GmshTensorProductElementBase):
dimensions = 3
@memoize_method
def gmsh_node_tuples(self):
# gmsh's node ordering is always on crack
# obtained by using the files in
# contrib/extract-gmsh-node-order
# with gmsh 2.7.1
return {
1: [
(0, 0, 0), (0, 1, 0), (1, 1, 0), (1, 0, 0),
(0, 0, 1), (0, 1, 1), (1, 1, 1), (1, 0, 1),
],
2: [
(0, 0, 0), (0, 0, 2), (2, 0, 2), (2, 0, 0),
(0, 2, 0), (0, 2, 2), (2, 2, 2), (2, 2, 0),
(0, 0, 1), (1, 0, 0), (0, 1, 0), (1, 0, 2),
(0, 1, 2), (2, 0, 1), (2, 1, 2), (2, 1, 0),
(0, 2, 1), (1, 2, 0), (1, 2, 2), (2, 2, 1),
(1, 0, 1), (0, 1, 1), (1, 1, 0), (1, 1, 2),
(2, 1, 1), (1, 2, 1), (1, 1, 1),
],
3: [
(0, 3, 3), (3, 3, 3), (3, 0, 3), (0, 0, 3),
(0, 3, 0), (3, 3, 0), (3, 0, 0), (0, 0, 0),
(1, 3, 3), (2, 3, 3), (0, 2, 3), (0, 1, 3),
(0, 3, 2), (0, 3, 1), (3, 2, 3), (3, 1, 3),
(3, 3, 2), (3, 3, 1), (2, 0, 3), (1, 0, 3),
(3, 0, 2), (3, 0, 1), (0, 0, 2), (0, 0, 1),
(1, 3, 0), (2, 3, 0), (0, 2, 0), (0, 1, 0),
(3, 2, 0), (3, 1, 0), (2, 0, 0), (1, 0, 0),
(1, 2, 3), (1, 1, 3), (2, 1, 3), (2, 2, 3),
(1, 3, 2), (2, 3, 2), (2, 3, 1), (1, 3, 1),
(0, 2, 2), (0, 2, 1), (0, 1, 1), (0, 1, 2),
(3, 2, 2), (3, 1, 2), (3, 1, 1), (3, 2, 1),
(2, 0, 2), (1, 0, 2), (1, 0, 1), (2, 0, 1),
(1, 2, 0), (2, 2, 0), (2, 1, 0), (1, 1, 0),
(1, 2, 2), (2, 2, 2), (2, 1, 2), (1, 1, 2),
(1, 2, 1), (2, 2, 1), (2, 1, 1), (1, 1, 1),
],
4: [
(4, 0, 0), (4, 4, 0), (0, 4, 0), (0, 0, 0),
(4, 0, 4), (4, 4, 4), (0, 4, 4), (0, 0, 4),
(4, 1, 0), (4, 2, 0), (4, 3, 0), (3, 0, 0),
(2, 0, 0), (1, 0, 0), (4, 0, 1), (4, 0, 2),
(4, 0, 3), (3, 4, 0), (2, 4, 0), (1, 4, 0),
(4, 4, 1), (4, 4, 2), (4, 4, 3), (0, 3, 0),
(0, 2, 0), (0, 1, 0), (0, 4, 1), (0, 4, 2),
(0, 4, 3), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(4, 1, 4), (4, 2, 4), (4, 3, 4), (3, 0, 4),
(2, 0, 4), (1, 0, 4), (3, 4, 4), (2, 4, 4),
(1, 4, 4), (0, 3, 4), (0, 2, 4), (0, 1, 4),
(3, 1, 0), (1, 1, 0), (1, 3, 0), (3, 3, 0),
(2, 1, 0), (1, 2, 0), (2, 3, 0), (3, 2, 0),
(2, 2, 0), (4, 1, 1), (4, 3, 1), (4, 3, 3),
(4, 1, 3), (4, 2, 1), (4, 3, 2), (4, 2, 3),
(4, 1, 2), (4, 2, 2), (3, 0, 1), (3, 0, 3),
(1, 0, 3), (1, 0, 1), (3, 0, 2), (2, 0, 3),
(1, 0, 2), (2, 0, 1), (2, 0, 2), (3, 4, 1),
(1, 4, 1), (1, 4, 3), (3, 4, 3), (2, 4, 1),
(1, 4, 2), (2, 4, 3), (3, 4, 2), (2, 4, 2),
(0, 3, 1), (0, 1, 1), (0, 1, 3), (0, 3, 3),
(0, 2, 1), (0, 1, 2), (0, 2, 3), (0, 3, 2),
(0, 2, 2), (3, 1, 4), (3, 3, 4), (1, 3, 4),
(1, 1, 4), (3, 2, 4), (2, 3, 4), (1, 2, 4),
(2, 1, 4), (2, 2, 4), (3, 1, 1), (3, 3, 1),
(1, 3, 1), (1, 1, 1), (3, 1, 3), (3, 3, 3),
(1, 3, 3), (1, 1, 3), (3, 2, 1), (2, 1, 1),
(3, 1, 2), (2, 3, 1), (3, 3, 2), (1, 2, 1),
(1, 3, 2), (1, 1, 2), (3, 2, 3), (2, 1, 3),
(2, 3, 3), (1, 2, 3), (2, 2, 1), (3, 2, 2),
(2, 1, 2), (2, 3, 2), (1, 2, 2), (2, 2, 3),
(2, 2, 2),
]
}[self.order]
# }}}
# }}}
# {{{ receiver interface
class GmshMeshReceiverBase(object):
"""
.. attribute:: gmsh_element_type_to_info_map
.. automethod:: set_up_nodes
.. automethod:: add_node
.. automethod:: finalize_nodes
.. automethod:: set_up_elements
.. automethod:: add_element
.. automethod:: finalize_elements
.. automethod:: add_tag
.. automethod:: finalize_tags
"""
gmsh_element_type_to_info_map = {
1: GmshIntervalElement(1),
2: GmshTriangularElement(1),
3: GmshQuadrilateralElement(1),
4: GmshTetrahedralElement(1),
5: GmshHexahedralElement(1),
8: GmshIntervalElement(2),
9: GmshTriangularElement(2),
10: GmshQuadrilateralElement(2),
11: GmshTetrahedralElement(2),
12: GmshHexahedralElement(2),
15: GmshPoint(0),
20: GmshIncompleteTriangularElement(3),
21: GmshTriangularElement(3),
22: GmshIncompleteTriangularElement(4),
23: GmshTriangularElement(4),
24: GmshIncompleteTriangularElement(5),
25: GmshTriangularElement(5),
26: GmshIntervalElement(3),
27: GmshIntervalElement(4),
28: GmshIntervalElement(5),
29: GmshTetrahedralElement(3),
30: GmshTetrahedralElement(4),
31: GmshTetrahedralElement(5),
92: GmshHexahedralElement(3),
93: GmshHexahedralElement(4),
}
def set_up_nodes(self, count):
pass
def add_node(self, node_nr, point):
pass
def finalize_nodes(self):
pass
def set_up_elements(self, count):
pass
def add_element(self, element_nr, element_type, vertex_nrs,
lexicographic_nodes, tag_numbers):
pass
def finalize_elements(self):
pass
def add_tag(self, name, index, dimension):
pass
def finalize_tags(self):
pass
# }}}
# {{{ receiver example
class GmshMeshReceiverNumPy(GmshMeshReceiverBase):
"""GmshReceiver that emulates the semantics of
:class:`meshpy.triangle.MeshInfo` and :class:`meshpy.tet.MeshInfo` by using
similar fields, but instead of loading data into ForeignArrays, load into
NumPy arrays. Since this class is not wrapping any libraries in other
languages -- the Gmsh data is obtained via parsing text -- use :mod:`numpy`
arrays as the base array data structure for convenience.
.. versionadded:: 2014.1
"""
def __init__(self):
# Use data fields similar to meshpy.triangle.MeshInfo and
# meshpy.tet.MeshInfo
self.points = None
self.elements = None
self.element_types = None
self.element_markers = None
self.tags = None
# Gmsh has no explicit concept of facets or faces; certain faces are a type
# of element. Consequently, there are no face markers, but elements can be
# grouped together in physical groups that serve as markers.
def set_up_nodes(self, count):
# Preallocate array of nodes within list; treat None as sentinel value.
# Preallocation not done for performance, but to assign values at indices
# in random order.
self.points = [None] * count
def add_node(self, node_nr, point):
self.points[node_nr] = point
def finalize_nodes(self):
pass
def set_up_elements(self, count):
# Preallocation of arrays for assignment elements in random order.
self.elements = [None] * count
self.element_types = [None] * count
self.element_markers = [None] * count
self.tags = []
def add_element(self, element_nr, element_type, vertex_nrs,
lexicographic_nodes, tag_numbers):
self.elements[element_nr] = vertex_nrs
self.element_types[element_nr] = element_type
self.element_markers[element_nr] = tag_numbers
# TODO: Add lexicographic node information
def finalize_elements(self):
pass
def add_tag(self, name, index, dimension):
self.tags.append((name, index, dimension))
def finalize_tags(self):
pass
# }}}
# {{{ file reader
class GmshFileFormatError(RuntimeError):
pass
def read_gmsh(receiver, filename, force_dimension=None):
"""Read a gmsh mesh file from *filename* and feed it to *receiver*.
:param receiver: Implements the :class:`GmshMeshReceiverBase` interface.
:param force_dimension: if not None, truncate point coordinates to
this many dimensions.
"""
mesh_file = open(filename, 'rt')
try:
result = parse_gmsh(receiver, mesh_file, force_dimension=force_dimension)
finally:
mesh_file.close()
return result
def generate_gmsh(receiver, source, dimensions=None, order=None, other_options=[],
extension="geo", gmsh_executable="gmsh", force_dimension=None,
output_file_name="output.msh"):
"""Run gmsh and feed the output to *receiver*.
:arg source: an instance of :class:`LiteralSource` or :class:`FileSource`
:param receiver: Implements the :class:`GmshMeshReceiverBase` interface.
"""
from meshpy.gmsh import GmshRunner
runner = GmshRunner(source, dimensions, order=order,
other_options=other_options, extension=extension,
gmsh_executable=gmsh_executable,
output_file_name=output_file_name)
runner.__enter__()
try:
result = parse_gmsh(receiver, runner.output_file,
force_dimension=force_dimension)
finally:
runner.__exit__(None, None, None)
return result
def parse_gmsh(receiver, line_iterable, force_dimension=None):
"""
:arg source: an instance of :class:`LiteralSource` or :class:`FileSource`
:arg receiver: This object will be fed the entities encountered in reading the
GMSH file. See :class:`GmshMeshReceiverBase` for the interface this
object needs to conform to.
:param force_dimension: if not None, | |
<reponame>suryaavala/zen_search<filename>tests/test_entity_engine.py
import json
import os
import unittest
import pytest
from zensearch.entity_engine import Entity
from zensearch.exceptions import DuplicatePrimaryKeyError, PrimaryKeyNotFoundError
def write_to_file(content, file_name):
with open(file_name, "w") as f:
f.write(str(content))
return
def get_entity_with_data_indices(entity_name):
"""Instantiates and returns an Entity object of entity_name after loading data (from
inferred test data file) and building _indices
Args:
entity_name (str): One of user, organization, ticket
Returns:
Entity(): entity object of name entity_name, with test data loaded and incdices built
"""
data_file_name = f"{os.path.dirname(os.path.abspath(__file__))}/test_data/test_data_import_{entity_name}s.json"
entity = Entity(entity_name)
entity.load_data_build_indices(data_file_name)
return entity
def get_all_entities(entity_names=["user", "ticket", "organization"]):
entities = {
entity_name: get_entity_with_data_indices(entity_name)
for entity_name in entity_names
}
return entities
def get_entity_from_formatted_data(entity_name, data):
entity = Entity(entity_name)
entity.load_data_build_indices(data)
return entity
class TestEntityEngine:
def test_entity_struct(self):
"""Test to see if Entity instantiates with
a primary key
alteast an index on primary key
_build_indices
load_data_build_indices
search
"""
entity = Entity("user")
assert entity.primary_key == "_id"
assert entity._indices == {"_id": {}}
assert entity._data == []
assert hasattr(entity, "_build_indices")
assert hasattr(entity, "load_data_build_indices")
assert hasattr(entity, "search")
class TestEntityEngineLoadData:
def test_entity_invalid_file(self):
"""Test for a FileNotFoundError when an empty string or invalid path to file is given
"""
entity = Entity("user")
invalid_files = ["a", "nofile.txt", "{}", "set", "True", "None"]
for invalid_file_name in invalid_files:
with pytest.raises(FileNotFoundError) as error:
entity.load_data_build_indices(invalid_file_name)
assert "[Errno 2] No such file or directory:" in str(error.value)
def test_entity_invalid_json_structure(self, tmpdir):
"""Invalid json in any of the entity files should throw a Json Decode Error
"""
for invalid_json in ["{", "[}]", '{"_id":1 "2":2}', "", " ", "[", "nothing"]:
tmp_file_name = f"{tmpdir}/invalid_json.json"
write_to_file(invalid_json, tmp_file_name)
entity = Entity("user")
with pytest.raises(ValueError):
entity.load_data_build_indices(tmp_file_name)
assert True
def test_entity_missing_mandatory_key(self, tmpdir):
"""Missing '_id' in ANY data point should throw a ValueError
"""
for empty_data in [
"{}",
"[{}]",
json.dumps({"url": "https://test.com"}),
json.dumps([{"_id": 1}, {"url": "https://test.com"}]),
]:
tmp_file_name = f"{tmpdir}/missing_id.json"
write_to_file(empty_data, tmp_file_name)
entity = Entity("user")
with pytest.raises(PrimaryKeyNotFoundError) as error:
entity.load_data_build_indices(tmp_file_name)
assert "Cannot find _id in the data point:" in str(error.value)
assert True
def test_entity_valid_data_in_file(self, tmpdir):
"""Testing with valid data should result in expected output, empty data [] should result in empty index
{} is not valid as it doesn't have the primary key in it
"""
test_io = {
"[]": {"_id": {}},
'{"_id": 1}': {"_id": {"1": {"_id": 1}}},
'[{"_id": 1}]': {"_id": {"1": {"_id": 1}}},
'[{"_id": 1, "d": 2}]': {"_id": {"1": {"_id": 1, "d": 2}}, "d": {2: [1]}},
}
for in_data in test_io:
tmp_file_name = f"{tmpdir}/invalid_json.json"
write_to_file(in_data, tmp_file_name)
entity = Entity("user")
entity.load_data_build_indices(tmp_file_name)
assert test_io[in_data] == entity._indices
assert True
def test_entity_valid_data_no_file(self, tmpdir):
"""Testing with valid data should result in expected output, empty data [] should result in empty index
{} is not valid as it doesn't have the primary key in it
"""
test_in_data = [[], {"_id": 1}, [{"_id": 1}], [{"_id": 1, "d": 2}]]
test_out_data = [
{"_id": {}},
{"_id": {"1": {"_id": 1}}},
{"_id": {"1": {"_id": 1}}},
{"_id": {"1": {"_id": 1, "d": 2}}, "d": {2: [1]}},
]
for inp, out in zip(test_in_data, test_out_data):
entity = Entity("user")
entity.load_data_build_indices(inp)
assert out == entity._indices
assert True
def test_custom_primary_key(self, tmpdir):
"""Custom primary key should use the given custom primary key
"""
tmp_file_name = f"{tmpdir}/custom_prim_key.json"
test_data = '[{"cid": 1}]'
test_primary_key = "cid"
expected_index = {"cid": {"1": {"cid": 1}}}
write_to_file(test_data, tmp_file_name)
entity = Entity("user", "cid")
entity.load_data_build_indices(tmp_file_name)
assert test_primary_key == entity.primary_key
assert expected_index == entity._indices
def test_build_load_invalid_data_type(self):
"""Valid data = [], [{"primary_key": }], 'path/to/file'
Invalid data should throw a value error
"""
invalid_input_data = [1, {1}, (), True, None, Entity("user")]
for invalid_data_point in invalid_input_data:
entity = Entity("ticket")
with pytest.raises(TypeError) as error:
entity.load_data_build_indices(invalid_data_point)
assert (
"Data to load should be one of file path as str(), data point as dict() or data as list of data point()"
== str(error.value)
)
assert True
class TestEntityEngineBuildIndices:
def test_build_index_missing_primary_key(self):
"""Missing primary key should throw an error
"""
no_pkey_data = [[{}], [{"url": "https://test.com"}]]
for no_pkey in no_pkey_data:
entity = Entity("ticket")
with pytest.raises(PrimaryKeyNotFoundError):
entity.load_data_build_indices(no_pkey)
assert True
def test_build_index_valid_data(self):
"""Valid data should return valid _indices
if the data is
- [] it should result in vanilla index
"""
test_ticket_in_data = [
[],
[{"_id": 1, "name": "surya"}],
[{"_id": 1, "name": "surya"}, {"_id": 2, "name": "surya"}],
[
{
"_id": "436bf9b0-1147-4c0a-8439-6f79833bff5b",
"url": "http://initech.zendesk.com/api/v2/tickets/436bf9b0-1147-4c0a-8439-6f79833bff5b.json",
"external_id": "9210cdc9-4bee-485f-a078-35396cd74063",
}
],
]
test_ticket_out_data = [
{"_id": {}},
{"_id": {"1": {"_id": 1, "name": "surya"}}, "name": {"surya": [1]}},
{
"_id": {
"1": {"_id": 1, "name": "surya"},
"2": {"_id": 2, "name": "surya"},
},
"name": {"surya": [1, 2]},
},
{
"_id": {
"436bf9b0-1147-4c0a-8439-6f79833bff5b": {
"_id": "436bf9b0-1147-4c0a-8439-6f79833bff5b",
"url": "http://initech.zendesk.com/api/v2/tickets/436bf9b0-1147-4c0a-8439-6f79833bff5b.json",
"external_id": "9210cdc9-4bee-485f-a078-35396cd74063",
},
},
"url": {
"http://initech.zendesk.com/api/v2/tickets/436bf9b0-1147-4c0a-8439-6f79833bff5b.json": [
"436bf9b0-1147-4c0a-8439-6f79833bff5b"
]
},
"external_id": {
"9210cdc9-4bee-485f-a078-35396cd74063": [
"436bf9b0-1147-4c0a-8439-6f79833bff5b"
]
},
},
]
for inp, out in zip(test_ticket_in_data, test_ticket_out_data):
entity = Entity("ticket")
entity.load_data_build_indices(inp)
assert out == entity._indices
assert True
def test_build_index_blank_values(self):
"""Testing for corner cases, empty strings, spaces, empty lists as values in data fields
"""
test_in_data = [
[{"_id": ""}],
[{"_id": " "}],
[{"_id": 1, "tags": []}],
[{"_id": "", "name": "surya"}],
]
test_out_data = [
{"_id": {"": {"_id": ""}}},
{"_id": {" ": {"_id": " "}}},
{"_id": {"1": {"_id": 1, "tags": []}}, "tags": {"": [1]}},
{"_id": {"": {"_id": "", "name": "surya"}}, "name": {"surya": [""]}},
]
for inp, out in zip(test_in_data, test_out_data):
entity = Entity("organization")
entity.load_data_build_indices(inp)
assert out == entity._indices
assert True
def test_build_index_tags(self):
"""Test that when the data point has values that are a list we flatten them
"""
test_in_data = [
[{"_id": 1, "tags": ["tag1", "tag2"]}],
[{"_id": 1, "tags": []}],
]
test_out_data = [
{
"_id": {"1": {"_id": 1, "tags": ["tag1", "tag2"]}},
"tags": {"tag1": [1], "tag2": [1]},
},
{"_id": {"1": {"_id": 1, "tags": []}}, "tags": {"": [1]}},
]
for inp, out in zip(test_in_data, test_out_data):
entity = Entity("ticket")
entity.load_data_build_indices(inp)
assert out == entity._indices
assert True
def test_build_index_unhashable(self):
"""Unhashable values in data point's fields should throw TypeErrors
"""
test_in_data = [
[{"_id": 1, "unhash": set()}],
[{"_id": 1, "tags": {}}],
]
for inp in test_in_data:
entity = Entity("ticket")
with pytest.raises(TypeError) as error:
entity.load_data_build_indices(inp)
assert "Unhashable value" in str(error.value)
assert True
def test_build_pkey_index_unhashable(self):
"""Unhashable values in data point's primary key index should not throw TypeErrors as they are being stringified
"""
test_in_data = [
[{"_id": {1: 1}}],
[{"_id": {1}}],
[{"_id": [1]}],
]
test_out_data = [
{"_id": {"{1: 1}": {"_id": {1: 1}}}},
{"_id": {"{1}": {"_id": {1}}}},
{"_id": {"[1]": {"_id": [1]}}},
]
for inp, out in zip(test_in_data, test_out_data):
entity = Entity("ticket")
print(inp)
entity.load_data_build_indices(inp)
assert entity._indices == out
assert True
def test_duplicate_primary_key(self):
"""Duplicate Primary Key throw an error
"""
test_in_data = [{"_id": 1}, {"_id": 1}]
for dup in test_in_data:
entity = Entity("user")
with pytest.raises(DuplicatePrimaryKeyError) as error:
entity.load_data_build_indices(test_in_data)
assert "Duplicate primary key value: " in str(error.value)
assert True
class TestEntityEngineDataFromPrimaryKeys:
def test_entity_match_list_primary_keys(self):
users = [
{"_id": 1, "name": "one"},
{"_id": 2, "name": "two"},
{"_id": 3, "name": "three"},
]
entity = get_entity_from_formatted_data("user", users)
assert users == list(entity.get_data_from_primary_keys([1, 2, 3]))
assert users[:-1] == list(entity.get_data_from_primary_keys([1, 2]))
def test_entity_no_match_list_primary_keys(self):
users = [
{"_id": 1, "name": "one"},
{"_id": 2, "name": "two"},
{"_id": 3, "name": "three"},
]
entity = get_entity_from_formatted_data("user", users)
assert [] == list(entity.get_data_from_primary_keys([0, -1, 99, "test", "11"]))
def test_entity_match_single_primary_keys(self):
users = [
{"_id": 1, "name": "one"},
{"_id": 2, "name": "two"},
{"_id": 3, "name": "three"},
]
entity = get_entity_from_formatted_data("user", users)
assert [users[0]] == list(entity.get_data_from_primary_keys(1))
assert [users[-1]] == list(entity.get_data_from_primary_keys(3))
def test_entity_no_match_single_primary_keys(self):
users = [
{"_id": 1, "name": "one"},
{"_id": 2, "name": "two"},
{"_id": 3, "name": "three"},
]
entity = get_entity_from_formatted_data("user", users)
assert [] == list(entity.get_data_from_primary_keys(0))
def test_entity_match_python_equality_paradox(self):
"""1.00 is treated same as 1.0 in python so having both of them in the code would raise an error
TODO: Should we support this functionality
"""
users = [
{"_id": 1, "name": "one"},
{"_id": 1.0, "name": "two"},
{"_id": True, "name": "three"},
{"_id": 1.01, "name": "four"},
]
same = [1, 1.0, True, 1.01]
entity = get_entity_from_formatted_data("user", users)
for search_term, expected_out in zip(same, users):
print(search_term)
assert [expected_out] == list(
entity.get_data_from_primary_keys(search_term)
)
assert True
def test_entity_match_get_empty(self):
users = [
{"_id": 1, "name": "one"},
{"_id": 2, "name": "two"},
{"_id": 3, "name": "three"},
]
entity = get_entity_from_formatted_data("user", users)
assert [] == list(entity.get_data_from_primary_keys([]))
assert [] == list(entity.get_data_from_primary_keys([None]))
assert [] == list(entity.get_data_from_primary_keys(""))
assert [] == list(entity.get_data_from_primary_keys(None))
def test_entity_ground_data_integrity(self):
users = [
{"_id": 1, "name": "one"},
{"_id": 2, "name": "two"},
{"_id": 3, "name": "three"},
]
entity | |
<gh_stars>0
from __future__ import annotations
import ctypes as ct
import os
from contextlib import contextmanager
from sys import executable as _python_interpretor
from typing import List
try:
# for use from outside the package, as a python package
from .pyomexmeta_api import PyOmexMetaAPI, eUriType, eXmlType, OmexMetaException
except ImportError:
try: # for internal use
from pyomexmeta_api import PyOmexMetaAPI, eUriType, eXmlType, OmexMetaException
except ImportError:
# for internal use
from . import pyomexmeta_api, eUriType, eXmlType, OmexMetaException
_pyom = PyOmexMetaAPI()
# expose get_last_error at top level so easily importable to other modules
_get_last_error = _pyom.get_last_error
def propagate_omexmeta_error(func):
"""
If @param func is a callable then this
function behaves like a decorator, checking
the return type for a omexmeta error. This is used in simpler functions
(of which there are many) that only call a omexmeta method.
If @param func is not callable, then we check
to see whether func is nullptr or < 0, indicative
of a omexmeta error. This is used in more complicated
situations
Args:
func: callable or value.
Returns: a func of @param is callable or the original value if not.
todo split into two functions (voilation of SRP).
- check_for_error_value
- check_for_error_return (for decorator)
"""
if callable(func):
def raise_error_if_necessary(*args, **kwargs):
failed = func(*args, **kwargs)
if failed is None:
err = _pyom.get_last_error()
_pyom.clear_last_error()
raise OmexMetaException(err)
if isinstance(failed, int):
if failed < 0:
err = _pyom.get_last_error()
_pyom.clear_last_error()
raise OmexMetaException(err)
return failed
return raise_error_if_necessary
else:
value = func
if value is None:
err = _pyom.get_last_error()
_pyom.clear_last_error()
raise OmexMetaException(err)
if isinstance(func, int):
if func < 0:
err = _pyom.get_last_error()
_pyom.clear_last_error()
raise OmexMetaException(err)
return func
class RDF:
def __init__(self, storage_type: str = "memory", storage_name: str = "libOmexMetaStore",
storage_options: str = None, model_options: str = None, rdf_ptr: ct.c_int64 = None):
# when pointer argument not given by user, create new instance of RDF
# argument is only given manually when static methods are used and
# this is hidden from users.
if not rdf_ptr:
self._obj = _pyom.rdf_new(
storage_type.encode(), storage_name.encode(),
None if storage_options is None else storage_options.encode(),
None if model_options is None else model_options.encode(),
)
else:
self._obj = rdf_ptr
@propagate_omexmeta_error
def __len__(self):
"""Returns the number of individual Triples stored in the rdf model"""
return _pyom.rdf_size(self._obj)
def __str__(self):
"""Defaults to rdfxml-abbrev syntax"""
return self.to_string("turtle")
def __del__(self):
"""deletes the RDF instance"""
self.delete()
def _set_rdf_ptr(self, ptr: ct.c_int64):
"""
Change the RDF pointer to ptr
Args:
ptr: a pointer to the C generated RDF object.
Returns:
"""
# first remove the existing pointer
self.delete()
# then do the switch
self._obj = ptr
@staticmethod
def from_string(rdf_string: str, format: str = "guess", base_uri: str = "Annotations.rdf",
storage_type: str = "hashes", storage_name: str = "semsim_storage", storage_options: str = None,
model_options: str = None) -> RDF:
"""read rdf from a string"""
rdf_ptr = _pyom.rdf_from_string(
rdf_string.encode(), format.encode(), base_uri.encode(),
storage_type.encode(), storage_name.encode(),
None if not storage_options else storage_options.encode(),
None if not storage_options else model_options.encode()
)
propagate_omexmeta_error(rdf_ptr)
rdf = RDF()
rdf._set_rdf_ptr(rdf_ptr)
return rdf
@propagate_omexmeta_error
def add_from_string(self, rdf_string: str, format: str = "guess", base_uri: str = "Annotations.rdf") -> None:
return _pyom.rdf_add_from_string(self._obj, rdf_string.encode(), format.encode(), base_uri.encode())
@staticmethod
def from_uri(uri_string: str, format: str, storage_type: str = "hashes", storage_name: str = "semsim_storage",
storage_options: str = None,
model_options: str = None) -> RDF:
rdf_ptr = _pyom.rdf_from_uri(
uri_string.encode(), format.encode(),
storage_type.encode(), storage_name.encode(),
None if not storage_options else storage_options.encode(),
None if not model_options else model_options.encode()
)
propagate_omexmeta_error(rdf_ptr)
rdf = RDF()
rdf._set_rdf_ptr(rdf_ptr)
return rdf
@propagate_omexmeta_error
def add_from_uri(self, uri_string: str, format: str) -> None:
return _pyom.rdf_add_from_uri(self._obj, uri_string.encode(), format.encode())
@staticmethod
def from_file(filename: str, format: str, storage_type: str = "hashes", storage_name: str = "semsim_storage",
storage_options: str = None, model_options: str = None) -> RDF:
rdf_ptr = _pyom.rdf_from_file(
filename.encode(), format.encode(),
storage_type.encode(), storage_name.encode(),
None if not storage_options else storage_options.encode(),
None if not storage_options else model_options.encode()
)
propagate_omexmeta_error(rdf_ptr)
rdf = RDF()
rdf._set_rdf_ptr(rdf_ptr)
return rdf
@staticmethod
@propagate_omexmeta_error
def equals_rdf_vs_rdf(first_rdf: RDF, second_rdf: RDF, format: str = "turtle") -> bool:
return _pyom.rdf_equals_rdf_vs_rdf(first_rdf._obj, second_rdf._obj, format.encode())
@staticmethod
@propagate_omexmeta_error
def equals_rdf_vs_string(rdf: RDF, string: str, format: str = "turtle") -> bool:
return _pyom.rdf_equals_rdf_vs_string(rdf._obj, string.encode(), format.encode())
@staticmethod
@propagate_omexmeta_error
def equals_string_vs_string(first_string: str, second_string: str, first_format: str = "turtle",
second_format: str = "turtle") -> bool:
return _pyom.rdf_equals_string_vs_string(first_string.encode(), second_string.encode(), first_format,
second_format.encode())
@propagate_omexmeta_error
def __eq__(self, other: RDF):
return self.equals_rdf_vs_rdf(self, other)
@propagate_omexmeta_error
def add_from_file(self, filename: str, format: str) -> None:
return _pyom.rdf_add_from_file(self._obj, filename.encode(), format.encode())
def delete(self) -> None:
"""destructor. Delete the dynamically allocated rdf object"""
return _pyom.rdf_delete(self._obj)
def to_string(self, format: str = "turtle") -> str:
str_ptr = _pyom.rdf_to_string(self._obj, format.encode())
propagate_omexmeta_error(str_ptr)
thestring = _pyom.get_and_free_c_str(str_ptr)
return thestring
@propagate_omexmeta_error
def to_file(self, format: str, filename: str) -> None:
return _pyom.rdf_to_file(self._obj, format.encode(), filename.encode())
@propagate_omexmeta_error
def set_repository_uri(self, repository_uri: str) -> None:
return _pyom.rdf_set_repository_uri(self._obj, repository_uri.encode())
@propagate_omexmeta_error
def set_archive_uri(self, archive_url: str) -> None:
return _pyom.rdf_set_archive_uri(self._obj, archive_url.encode())
@propagate_omexmeta_error
def set_model_uri(self, model_uri: str) -> None:
return _pyom.rdf_set_model_uri(self._obj, model_uri.encode())
def get_repository_uri(self) -> str:
string_ptr = _pyom.rdf_get_repository_uri(self._obj)
propagate_omexmeta_error(string_ptr)
return _pyom.get_and_free_c_str(string_ptr)
def get_archive_uri(self) -> str:
string_ptr = _pyom.rdf_get_archive_uri(self._obj)
propagate_omexmeta_error(string_ptr)
return _pyom.get_and_free_c_str(string_ptr)
def get_model_uri(self) -> str:
string_ptr = _pyom.rdf_get_model_uri(self._obj)
propagate_omexmeta_error(string_ptr)
return _pyom.get_and_free_c_str(string_ptr)
def get_local_uri(self) -> str:
string_ptr = _pyom.rdf_get_local_uri(self._obj)
propagate_omexmeta_error(string_ptr)
return _pyom.get_and_free_c_str(string_ptr)
def query(self, query_str: str, results_format: str) -> str:
query_results_ptr = _pyom.rdf_query_results_as_str(
self._obj, query_str.encode(), results_format.encode())
propagate_omexmeta_error(query_results_ptr)
results_crlf = _pyom.get_and_free_c_str(query_results_ptr)
results_lf = _pyom.crlf_to_lr(results_crlf)
return results_lf
def to_editor(self, xml: str, generate_new_metaids: bool = False, sbml_semantic_extraction: bool = True) -> Editor:
obj = _pyom.rdf_to_editor(
self._obj, xml.encode(),
generate_new_metaids,
sbml_semantic_extraction,
)
propagate_omexmeta_error(obj)
return Editor(obj)
def draw(self, filename: str, **kwargs):
"""
render an graph of RDF and save to `filename`
Args:
filename: where to write. The extension determines the format. See
https://graphviz.readthedocs.io/en/stable/index.html for more
details about accepted formats.
Returns:
"""
try:
import graphviz
except ImportError:
raise ImportError(f'"graphviz" not found. Install '
f'with "sudo apt install graphviz" and then '
f'"pip install graphviz". This may be an conda environment issue. Check that '
f'you are using the correct python interpretor. The interpreter being used '
f'now is \"{_python_interpretor}\"')
dot = self.to_string("dot")
src = graphviz.Source(dot, **kwargs)
src.render(filename)
print('RDF graph saved to "{}"'.format(filename))
if not os.path.isfile(filename):
raise ValueError("Output was not written to file \"{}\"".format(filename))
class Editor:
def __init__(self, editor_ptr: ct.c_int64):
self._obj = editor_ptr
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.delete()
@propagate_omexmeta_error
def add_namespace(self, namespace: str, prefix: str) -> None:
return _pyom.editor_add_namespace(self._obj, namespace, prefix)
@propagate_omexmeta_error
def add_singular_annotation(self, singular_annotation: SingularAnnotation) -> None:
val = _pyom.editor_add_single_annotation(self._obj, singular_annotation.get_ptr())
return val
@propagate_omexmeta_error
def add_physical_entity(self, physical_entity: PhysicalEntity) -> None:
err_code = _pyom.editor_add_physical_entity(self._obj, physical_entity.get_ptr())
return err_code
@propagate_omexmeta_error
def add_physical_process(self, physical_process: PhysicalProcess) -> None:
return _pyom.editor_add_physical_process(self._obj, physical_process.get_ptr())
@propagate_omexmeta_error
def add_energy_diff(self, energy_diff: EnergyDiff) -> None:
return _pyom.editor_add_energy_diff(self._obj, energy_diff.get_ptr())
@propagate_omexmeta_error
def add_personal_information(self, personal_information: PersonalInformation) -> None:
return _pyom.editor_add_personal_information(self._obj, personal_information.get_ptr())
@propagate_omexmeta_error
def add_physical_property(self, property: PhysicalProperty) -> None:
return _pyom.editor_add_physical_property(self._obj, property.get_ptr())
@propagate_omexmeta_error
def check_valid_metaid(self, id: str) -> None:
return _pyom.editor_check_valid_metaid(self._obj, id)
def get_metaids(self) -> List[str]:
num_ids = _pyom.editor_get_num_metaids(self._obj)
propagate_omexmeta_error(num_ids)
return [_pyom.get_and_free_c_str(
propagate_omexmeta_error(_pyom.editor_get_metaid(self._obj, id))
) for id in range(num_ids)]
@propagate_omexmeta_error
def remove_single_annotation(self, single_annotaiton_ptr: ct.c_int64) -> None:
return _pyom.editor_remove_single_annotation(self._obj, single_annotaiton_ptr)
@propagate_omexmeta_error
def remove_physical_entity(self, physical_entity_ptr: ct.c_int64) -> None:
return _pyom.editor_remove_physical_entity(self._obj, physical_entity_ptr)
@propagate_omexmeta_error
def remove_physical_process(self, physical_process_ptr: ct.c_int64) -> None:
return _pyom.editor_remove_physical_process(self._obj, physical_process_ptr)
@propagate_omexmeta_error
def remove_energy_diff(self, energy_diff_ptr: ct.c_int64) -> None:
return _pyom.editor_remove_energy_diff(self._obj, energy_diff_ptr)
@propagate_omexmeta_error
def remove_personal_information(self, personal_information_ptr: ct.c_int64) -> None:
return _pyom.editor_remove_personal_information(self._obj, personal_information_ptr)
def get_xml(self) -> str:
return _pyom.get_and_free_c_str(
propagate_omexmeta_error(_pyom.editor_get_xml(self._obj))
)
@contextmanager
def new_singular_annotation(self) -> SingularAnnotation:
obj = _pyom.editor_new_singular_annotation(self._obj)
if obj is None:
raise OmexMetaException(_pyom.get_last_error())
singular_annotation = SingularAnnotation(obj)
try:
yield singular_annotation
finally:
self.add_singular_annotation(singular_annotation)
@contextmanager
def new_personal_information(self) -> SingularAnnotation:
obj = _pyom.editor_new_personal_information(self._obj)
if obj is None:
raise OmexMetaException(_pyom.get_last_error())
information = PersonalInformation(obj)
try:
yield information
finally:
self.add_personal_information(information)
@contextmanager
def new_physical_entity(self) -> PhysicalEntity:
obj = _pyom.editor_new_physical_entity(self._obj)
if obj is None:
raise OmexMetaException(_pyom.get_last_error())
physical_entity = PhysicalEntity(obj)
try:
yield physical_entity
finally:
self.add_physical_entity(physical_entity)
@contextmanager
def new_physical_process(self) -> PhysicalProcess:
obj = _pyom.editor_new_physical_process(self._obj)
if obj is None:
raise OmexMetaException(_pyom.get_last_error())
physical_process = PhysicalProcess(obj)
try:
yield physical_process
finally:
self.add_physical_process(physical_process)
@contextmanager
def new_energy_diff(self) -> EnergyDiff:
obj = _pyom.editor_new_energy_diff(self._obj)
if obj is None:
raise OmexMetaException(_pyom.get_last_error())
energy_diff = EnergyDiff(obj)
try:
yield energy_diff
finally:
self.add_energy_diff(energy_diff)
def new_physical_property(self) -> PhysicalProperty:
obj = _pyom.editor_new_physical_property(self._obj)
if obj is None:
raise OmexMetaException(_pyom.get_last_error())
return PhysicalProperty(obj)
def delete(self):
return _pyom.editor_delete(self._obj)
@propagate_omexmeta_error
def add_creator(self, creator) -> Editor:
self._obj = _pyom.editor_add_creator(self._obj, creator.encode())
propagate_omexmeta_error(self._obj)
return self
def add_curator(self, curator) -> Editor:
self._obj = _pyom.editor_add_curator(self._obj, curator.encode())
propagate_omexmeta_error(self._obj)
return self
def add_taxon(self, taxon) -> Editor:
| |
<gh_stars>100-1000
import timeboard as tb
from timeboard.interval import Interval
from timeboard.exceptions import OutOfBoundsError, PartialOutOfBoundsError
import datetime
import pandas as pd
import pytest
def tb_10_8_6_hours(workshift_ref='start', worktime_source='duration'):
shifts = tb.Marker(each='D', at=[{'hours': 2}, {'hours': 8}, {'hours': 18}])
daily = tb.Organizer(marker=shifts, structure=[1, 0])
return tb.Timeboard(base_unit_freq='H',
start='01 Oct 2017', end='06 Oct 2017',
layout=daily,
workshift_ref=workshift_ref,
worktime_source=worktime_source)
# workshift day dur end label on_duty
# loc
# 0 2017-10-01 00:00:00 1 1 2 2017-10-01 01:59:59 1.0 True
# 1 2017-10-01 02:00:00 1 1 6 2017-10-01 07:59:59 0.0 False
# 2 2017-10-01 08:00:00 1 1 10 2017-10-01 17:59:59 1.0 True
# 3 2017-10-01 18:00:00 1x2 8 2017-10-02 01:59:59 0.0 False
# 4 2017-10-02 02:00:00 2 2 6 2017-10-02 07:59:59 1.0 True
# 5 2017-10-02 08:00:00 2 2 10 2017-10-02 17:59:59 0.0 False
# 6 2017-10-02 18:00:00 2x3 8 2017-10-03 01:59:59 1.0 True
# 7 2017-10-03 02:00:00 3 3 6 2017-10-03 07:59:59 0.0 False
# 8 2017-10-03 08:00:00 3 3 10 2017-10-03 17:59:59 1.0 True
# 9 2017-10-03 18:00:00 3x4 8 2017-10-04 01:59:59 0.0 False
# 10 2017-10-04 02:00:00 4 4 6 2017-10-04 07:59:59 1.0 True
# 11 2017-10-04 08:00:00 4 4 10 2017-10-04 17:59:59 0.0 False
# 12 2017-10-04 18:00:00 4x5 8 2017-10-05 01:59:59 1.0 True
# 13 2017-10-05 02:00:00 5 5 6 2017-10-05 07:59:59 0.0 False
# 14 2017-10-05 08:00:00 5 5 10 2017-10-05 17:59:59 1.0 True
# 15 2017-10-05 18:00:00 5x6 7 2017-10-06 00:59:59 0.0 False
class TestIntervalCompoundConstructor:
def test_interval_constructor_compound_with_two_ts(self):
clnd = tb_10_8_6_hours()
ivl = clnd.get_interval(('01 Oct 2017 10:00', '02 Oct 2017 23:00'))
assert ivl.start_time == datetime.datetime(2017, 10, 1, 8, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 10, 3, 1, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 10, 3, 2, 0, 0)
assert ivl._loc == (2,6)
assert len(ivl) == 5
ivlx = clnd(('01 Oct 2017 10:00', '02 Oct 2017 23:00'))
assert ivlx._loc == ivl._loc
def test_interval_constructor_compound_with_two_ts_open_ended(self):
clnd = tb_10_8_6_hours()
ivl = clnd.get_interval(('01 Oct 2017 10:00', '02 Oct 2017 23:00'),
closed='00')
assert ivl.start_time == datetime.datetime(2017, 10, 1, 18, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 10, 2, 17, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 10, 2, 18, 0, 0)
assert ivl._loc == (3, 5)
assert len(ivl) == 3
def test_interval_constructor_compound_with_two_ts_same_ws(self):
clnd = tb_10_8_6_hours()
ivl = clnd.get_interval(('02 Oct 2017 19:15', '03 Oct 2017 01:10'))
assert ivl.start_time == datetime.datetime(2017, 10, 2, 18, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 10, 3, 1, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 10, 3, 2, 0, 0)
assert ivl._loc == (6,6)
assert len(ivl) == 1
ivlx = clnd(('02 Oct 2017 19:15', '03 Oct 2017 01:10'))
assert ivlx._loc == ivl._loc
def test_interval_constructor_compound_with_length(self):
clnd = tb_10_8_6_hours()
ivl = clnd.get_interval('02 Oct 2017 15:00', length=7)
assert ivl.start_time == datetime.datetime(2017, 10, 2, 8, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 10, 4, 17, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 10, 4, 18, 0, 0)
assert ivl._loc == (5,11)
assert len(ivl) == 7
ivlx = clnd('02 Oct 2017 15:00', length=7)
assert ivlx._loc == ivl._loc
def test_interval_constructor_compound_with_period(self):
clnd = tb_10_8_6_hours()
ivl = clnd.get_interval('01 Oct 2017 19:00', period='D')
assert ivl.start_time == datetime.datetime(2017, 10, 1, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 10, 2, 1, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 10, 2, 2, 0, 0)
assert ivl._loc == (0, 3)
assert len(ivl) == 4
ivlx = clnd('01 Oct 2017 03:00', period='D')
assert ivlx._loc == ivl._loc
ivl = clnd.get_interval(pd.Period('01 Oct 2017 19:00', freq='D'))
assert ivl._loc == (0, 3)
def test_interval_constructor_compound_with_period2(self):
clnd = tb_10_8_6_hours()
ivl = clnd.get_interval('02 Oct 2017 01:00', period='D')
# The ws of '02 Oct 2017 01:00' begins on the previous day.
# With workshift_ref='start', it does not belong to ivl, hence
# the day is not fully covered by the interval, the ivl reference point
# is not in the interval
assert ivl.start_time == datetime.datetime(2017, 10, 2, 2, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 10, 3, 1, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 10, 3, 2, 0, 0)
assert ivl._loc == (4, 6)
assert len(ivl) == 3
ivlx = clnd('02 Oct 2017 01:00', period='D')
assert ivlx._loc == ivl._loc
def test_interval_constructor_compound_with_period3(self):
clnd = tb_10_8_6_hours(workshift_ref='end')
ivl = clnd.get_interval('01 Oct 2017 19:00', period='D')
# the ws starting at 18:00 ends next day.
# With workshift_ref='end' is does not belong to the ivl , hence
# the day is not fully covered by the interval, the ivl reference point
# is not in the interval
assert ivl.start_time == datetime.datetime(2017, 10, 1, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 10, 1, 17, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 10, 1, 18, 0, 0)
assert ivl._loc == (0, 2)
assert len(ivl) == 3
ivlx = clnd('01 Oct 2017 19:00', period='D')
assert ivlx._loc == ivl._loc
def test_interval_constructor_compound_with_period4(self):
clnd = tb_10_8_6_hours(workshift_ref='end')
ivl = clnd.get_interval('02 Oct 2017 01:00', period='D')
# the ws starting at 18:00 ends next day.
# With workshift_ref='end' is does not belong to the ivl , hence
# the day is not fully covered by the interval
assert ivl.start_time == datetime.datetime(2017, 10, 1, 18, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 10, 2, 17, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 10, 2, 18, 0, 0)
assert ivl._loc == (3, 5)
assert len(ivl) == 3
ivlx = clnd('02 Oct 2017 01:00', period='D')
assert ivlx._loc == ivl._loc
def test_interval_constructor_compound_with_period_partial(self):
clnd = tb_10_8_6_hours()
# this period is completely outside the tb because the last workshift's
# ref time is in the previous day Oct 5
with pytest.raises(OutOfBoundsError):
clnd.get_interval('06 Oct 2017 00:15', period='D')
def test_interval_constructor_compound_with_period_partial2(self):
clnd = tb_10_8_6_hours(workshift_ref='end')
ivl = clnd.get_interval('06 Oct 2017 00:15', period='D')
assert ivl.start_time == datetime.datetime(2017, 10, 5, 18, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 10, 6, 0, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 10, 6, 1, 0, 0)
assert ivl._loc == (15, 15)
assert len(ivl) == 1
ivl = clnd.get_interval('06 Oct 2017 20:15', period='D')
assert ivl._loc == (15, 15)
with pytest.raises(PartialOutOfBoundsError):
ivl = clnd.get_interval('06 Oct 2017 00:15', period='D',
clip_period=False)
def test_interval_constructor_compound_with_period_partial3(self):
clnd = tb_10_8_6_hours()
ivl = clnd.get_interval('01 Oct 2017 00:15', period='W')
# 01 Oct 2017 is Sunday, the last day of the week
assert ivl.start_time == datetime.datetime(2017, 10, 1, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 10, 2, 1, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 10, 2, 2, 0, 0)
assert ivl._loc == (0, 3)
assert len(ivl) == 4
ivl = clnd.get_interval('30 Sep 2017 12:00', period='W')
assert ivl._loc == (0, 3)
with pytest.raises(PartialOutOfBoundsError):
ivl = clnd.get_interval('01 Oct 2017 00:15', period='W',
clip_period=False)
def test_interval_constructor_compound_with_period_partial4(self):
clnd = tb_10_8_6_hours(workshift_ref='end')
ivl = clnd.get_interval('01 Oct 2017 00:15', period='W')
# 01 Oct 2017 is Sunday, the last day of the week
assert ivl.start_time == datetime.datetime(2017, 10, 1, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 10, 1, 17, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 10, 1, 18, 0, 0)
assert ivl._loc == (0, 2)
assert len(ivl) == 3
ivl = clnd.get_interval('30 Sep 2017 12:00', period='W')
assert ivl._loc == (0, 2)
with pytest.raises(PartialOutOfBoundsError):
ivl = clnd.get_interval('01 Oct 2017 00:15', period='W',
clip_period=False)
class TestIntervalCompoundCountPeriodsWithLocsNotStraddling(object):
def test_ivl_compound_count_periods_one_float_right_duty_on(self):
clnd = tb_10_8_6_hours()
ivl = Interval(clnd, (4, 5))
assert ivl.count_periods('D') == 1.0 / 2.0
clnd = tb_10_8_6_hours(workshift_ref='end')
assert clnd._timeline._workshift_ref == 'end'
ivl = Interval(clnd, (4, 5))
assert ivl.count_periods('D') == 1.0
def test_ivl_compound_count_periods_one_float_right_duty_off(self):
clnd = tb_10_8_6_hours()
ivl = Interval(clnd, (7, 8))
assert ivl.count_periods('D', duty='off') == 1.0 / 2.0
clnd = tb_10_8_6_hours(workshift_ref='end')
ivl = Interval(clnd, (7, 8))
assert ivl.count_periods('D', duty='off') == 2.0 / 2.0
def test_ivl_compound_count_periods_one_float_right_duty_any(self):
clnd = tb_10_8_6_hours()
ivl = Interval(clnd, (1, 2))
assert ivl.count_periods('D', duty='any') == 2.0 / 4.0
clnd = tb_10_8_6_hours(workshift_ref='end')
ivl = Interval(clnd, (1, 2))
assert ivl.count_periods('D', duty='any') == 2.0 / 3.0
def test_ivl_compound_count_periods_one_float_left_duty_on(self):
clnd = tb_10_8_6_hours()
ivl = Interval(clnd, (7, 8))
assert ivl.count_periods('D') == 1.0 / 1.0
clnd = tb_10_8_6_hours(workshift_ref='end')
ivl = Interval(clnd, (7, 8))
assert ivl.count_periods('D') == 1.0 / 2.0
def test_ivl_compound_count_periods_one_float_left_duty_off(self):
clnd = tb_10_8_6_hours()
ivl = Interval(clnd, (4, 5))
assert ivl.count_periods('D', duty='off') == 1.0 / 1.0
clnd = tb_10_8_6_hours(workshift_ref='end')
ivl = Interval(clnd, (4, 5))
assert ivl.count_periods('D', duty='off') == 1.0 / 2.0
def test_ivl_compound_count_periods_many_float_right_duty_on(self):
clnd = tb_10_8_6_hours()
ivl = Interval(clnd, (4, 11))
assert ivl.count_periods('D') == 2.5
clnd = tb_10_8_6_hours(workshift_ref='end')
ivl = Interval(clnd, (4, 11))
assert ivl.count_periods('D') == 3.0
def test_ivl_compound_count_periods_many_float_left_duty_off(self):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.