max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
fetch_pbd_interaction/src/fetch_pbd_interaction/action.py | fetchrobotics/fetch_pbd | 16 | 6632951 | <filename>fetch_pbd_interaction/src/fetch_pbd_interaction/action.py
'''The in-program representation of a programmed action.'''
# ######################################################################
# Imports
# ######################################################################
# Core ROS imports come first.
import rospy
# System builtins
import threading
# ROS builtins
from geometry_msgs.msg import Vector3, PoseStamped, Quaternion
from std_msgs.msg import Header, ColorRGBA, String
from visualization_msgs.msg import MarkerArray, Marker
import tf
# Local
from fetch_pbd_interaction.arm_target import ArmTarget
from fetch_pbd_interaction.arm_trajectory import ArmTrajectory
from fetch_pbd_interaction.grasp import Grasp
from fetch_pbd_interaction.msg import ExecutionStatus, OrientationRPY, \
ArmState, Landmark
# ######################################################################
# Module level constants
# ######################################################################
# Marker properties for little arrows drawn between consecutive primitives.
LINK_MARKER_LIFETIME = rospy.Duration()
LINK_SCALE = Vector3(0.01, 0.03, 0.03)
LINK_COLOR = ColorRGBA(0.8, 0.8, 0.8, 0.3) # sort of light gray
# ROS topics, etc.
TOPIC_MARKERS = '/fetch_pbd/visualization_marker_array'
# TODO(sarah): Is this necessary?
BASE_LINK = 'base_link'
# ######################################################################
# Classes
# ######################################################################
class Action:
'''Holds information for one action.'''
# TODO(sarah) : Probably get rid of this. Should the class get passed a
# shared marker publisher from the Session or each instance should have
# its own?
_marker_publisher = None
def __init__(self, robot, tf_listener, im_server, primitive_click_cb,
action_change_cb, action_id=None,
grasp_suggestion_service=None,
grasp_feedback_topic=None,
external_ee_link=None):
'''
Args:
robot (Robot) : interface to lower level robot functionality
tf_listener (TransformListener)
im_server (InteractiveMarkerSerever)
primitive_click_cb (function(int)): The function to call when a
primitive is clicked on (normally in the GUI). The function
should take the number of the primitive
action_id (int, optional): The index of this action.
'''
# Initialize a bunch of state.
self._name = "" # Human-friendly name for this action.
self._im_server = im_server
self._seq = []
self._action_id = action_id
self._robot = robot
self._primitive_click_cb = primitive_click_cb
self._action_change_cb = action_change_cb
self._status = ExecutionStatus.NOT_EXECUTING
self._preempt = False
self._tf_listener = tf_listener
self._primitive_counter = 0
# Markers to connect consecutive primitives together
self._link_markers = {}
self._grasp_suggestion_service = grasp_suggestion_service
self._grasp_feedback_topic = grasp_feedback_topic
self._external_ee_link = external_ee_link
# TODO(sarah): Understand this note better
# NOTE(mbforbes): It appears that this is locking manipulation
# of the internal sequence (self._seq). There have been race
# conditions involving this (e.g. marker_click_cb(...)).
#
# In general, be aware the other code calling these methods
# with data about this class (like how many primitives it holds)
# is bad because that means the outside code is assuming that it
# knows about state internal to this class, and that information
# may not be true by the time the code here gets executed. This
# is because there are several callbacks that trigger here so
# we must reason asyncronously.
#
# Unless the information you have (e.g. about the number of
# primitives that exist) was learned while this lock was acquired,
# you cannot assume it is true.
self._lock = threading.Lock()
self._status_publisher = rospy.Publisher('/fetch_pbd/fetch_pbd_status',
String,
queue_size=10)
if Action._marker_publisher is None:
Action._marker_publisher = rospy.Publisher(TOPIC_MARKERS,
MarkerArray,
queue_size=10,
latch=True)
# ##################################################################
# Instance methods: Public (API)
# ##################################################################
def head_busy(self):
'''Returns true if head is busy
Returns:
bool
'''
for primitive in self._seq:
if primitive.head_busy():
return True
return False
def get_action_id(self):
''' Returns action_id
Returns:
int
'''
return self._action_id
def set_action_id(self, action_id):
''' Returns action_id
Args:
action_id (int)
'''
self._action_id = action_id
def set_name(self, name):
'''Sets human-readable name for action
Args:
name (string)
'''
self._name = name
def get_name(self):
'''Returns human-readable name for action
Returns
(string)
'''
return self._name
def get_json(self):
'''Return json for this action for saving to db
Returns:
dict
'''
json = {}
json['name'] = self._name
json['primitive_counter'] = self._primitive_counter
json['id'] = self._action_id
json['seq'] = []
for primitive in self._seq:
json['seq'].append(primitive.get_json())
return json
def build_from_json(self, json):
'''Fills out action using information using json from db
Args:
dict : json/dict retrieved from couchdb
'''
enabled = True
self._action_id = json['id']
self._name = json['name']
self._primitive_counter = json['primitive_counter']
for primitive in json['seq']:
if primitive.has_key('arm_target'):
target = primitive['arm_target']
primitive = ArmTarget(self._robot, self._tf_listener,
self._im_server)
primitive.build_from_json(target)
elif primitive.has_key('arm_trajectory'):
target = primitive['arm_trajectory']
primitive = ArmTrajectory(self._robot, self._tf_listener,
self._im_server)
primitive.build_from_json(target)
elif primitive.has_key('grasp'):
if self._grasp_suggestion_service == "":
enabled = False
target = primitive['grasp']
primitive = Grasp(self._robot, self._tf_listener,
self._im_server,
self._grasp_suggestion_service,
self._grasp_feedback_topic,
self._external_ee_link)
primitive.build_from_json(target)
self.add_primitive(primitive, False, False)
self.reset_viz()
return enabled
def start_execution(self):
''' Starts execution of action.
This method spawns a new thread.
Args:
z_offset (float): Amount to add to z-values of pose
positions.
'''
# This will take long; create a thread.
self._preempt = False
self._status = ExecutionStatus.EXECUTING
thread = threading.Thread(
group=None,
target=self._execute_action,
name="action_execution_thread"
)
thread.start()
def stop_execution(self):
''' Indicate that user wants to preempt action execution '''
self._preempt = True
def end_execution(self):
''' Indicate that execution status can reset to
ExecutionStatus.NOT_EXECUTING
'''
self._status = ExecutionStatus.NOT_EXECUTING
def get_status(self):
'''Return execution status of action
Returns:
ExecutionStatus.EXECUTING|NOT_EXECUTING|...etc
'''
return self._status
def set_status(self, status):
'''Set execution status of action
Args:
status (ExecutionStatus.EXECUTING|NOT_EXECUTING|...etc)
'''
self._status = status
def add_primitive(self, primitive, add_marker=True, add_name=True):
'''Add primitive to action.
Args:
primitive (Primitive)
add_marker (bool)
add_name (bool)
'''
self._lock.acquire()
rospy.loginfo("Adding primitive")
if add_name:
primitive.set_name("primitive_" + str(self._primitive_counter))
self._primitive_counter += 1
primitive.add_marker_callbacks(
self.select_primitive, # marker_click_cb
self.delete_primitive,
self._primitive_pose_change,
self._action_change_cb
)
if primitive.get_ref_type() == ArmState.PREVIOUS_TARGET:
primitive.change_ref_frame(ArmState.PREVIOUS_TARGET, Landmark())
self._seq.append(primitive)
if add_marker:
# self._marker_visibility.append(True)
primitive.show_marker()
self._update_markers()
self._lock.release()
self.update_viz()
else:
# self._marker_visibility.append(False)
primitive.hide_marker()
self._lock.release()
rospy.loginfo("Primitive added")
def update_objects(self):
'''For each primitive, updates the reference frames based on
the locations of objects in the world
'''
self._lock.acquire()
rospy.loginfo("Updating objects")
for primitive in self._seq:
if not primitive.update_ref_frames():
primitive.hide_marker()
else:
primitive.show_marker()
self._update_markers()
self._lock.release()
self._action_change_cb()
def n_primitives(self):
'''Returns the number of primitives in this action.
Returns:
int
'''
return len(self._seq)
def reset_viz(self):
'''Removes all visualization relating to this action.'''
self._lock.acquire()
# Destroy the primitive markers.
for primitive in self._seq:
primitive.hide_marker()
self._im_server.clear()
# Mark the links for destruction.
for i in self._link_markers.keys():
self._link_markers[i].action = Marker.DELETE
# Publish the link destructions.
m_array = MarkerArray()
for i in self._link_markers.keys():
m_array.markers.append(self._link_markers[i])
self._marker_publisher.publish(m_array)
self._link_markers = {}
self._lock.release()
def delete_primitive_marker(self, primitive_number):
'''Delete marker with certain index
Args:
primitive_number (int)
'''
# self._marker_visibility[primitive_number] = False
if self.n_primitives() > 0:
primitive = self._seq[primitive_number]
primitive.hide_marker()
def make_primitive_marker(self, primitive_number):
'''Show marker with certain index
Args:
primitive_number (int)
'''
# self._marker_visibility[primitive_number] = True
primitive = self._seq[primitive_number]
if not primitive.show_marker():
rospy.logwarn
self._status_publisher.publish(
'Not showing marker for {}'.format(primitive.get_name()) +
' because no matching object found. Try "record objects"?')
def get_marker_visibility(self):
'''Returns visibility status of primitive markers
Returns:
[bool]
'''
marker_visibility = []
for primitive in self._seq:
marker_visibility += [primitive.marker_visible()]
return marker_visibility
def select_primitive(self, primitive_number, is_selected):
'''Callback for when one of the markers is clicked.
Selects clicked marker and unselects others.
Args:
primitive_number (int)
is_selected(bool): Whether the marker was
selected (True) or de-selected (False).
'''
self._lock.acquire()
for primitive in self._seq:
# If we match the one we've clicked on, select it.
if primitive.get_primitive_number() == primitive_number:
primitive.select(is_selected)
primitive.update_viz()
else:
# Otherwise, deselect it.
if primitive.is_control_visible():
primitive.select(False)
primitive.update_viz()
# If we selected it, really click on it.
if is_selected:
self._primitive_click_cb(primitive_number)
else:
self._primitive_click_cb(-1)
self._lock.release()
self.update_viz()
def initialize_viz(self):
'''Initialize visualization.'''
rospy.loginfo("Initialising viz for: {}".format(self.get_action_id()))
# self._lock.acquire()
# self._marker_visibility = [True] * len(self._seq)
marker_visibility = []
for i in range(len(self._seq)):
primitive = self._seq[i]
# Construct the markers.
marker_visibility.append(primitive.show_marker())
if False in marker_visibility:
rospy.logwarn("Not showing primitive markers because " +
"no objects present")
self._status_publisher.publish(
String("Not showing primitive markers because " +
"no objects present"))
self._update_markers()
# self._lock.release()
self.update_viz()
def delete_last_primitive(self):
'''Deletes the last primitive of the action.'''
if self.n_primitives() > 0:
self.delete_primitive(len(self._seq) - 1)
def is_object_required(self):
'''Returns whether this action has any primitives that are relative
to objects in the world (instead of absolute).
Returns:
bool
'''
is_required = False
self._lock.acquire()
for primitive in self._seq:
is_required = primitive.is_object_required()
if is_required:
break
self._lock.release()
return is_required
def get_ref_frame_names(self):
'''Returns the names of the reference frame objects for all
action primitives.
Returns:
[str]
'''
self._lock.acquire()
ref_frame_names = []
for primitive in self._seq:
ref_frame_names += [primitive.get_ref_frame_name()]
self._lock.release()
return ref_frame_names
def get_primitive_names(self):
'''Returns the names of primitives.
Returns:
[str]
'''
self._lock.acquire()
names = []
for primitive in self._seq:
names += [primitive.get_name()]
self._lock.release()
return names
def get_primitive_positions_orientations(self):
'''Returns the positions and orientations of primitives
Returns:
Point[], OrientationRPY[]
'''
self._lock.acquire()
positions = []
orientations = []
for primitive in self._seq:
pose = primitive.get_relative_pose()
quaternion = (
pose.pose.orientation.x,
pose.pose.orientation.y,
pose.pose.orientation.z,
pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
rpy = OrientationRPY(euler[0], euler[1], euler[2])
positions += [pose.pose.position]
orientations += [rpy]
self._lock.release()
return positions, orientations
def get_primitives_editable(self):
'''Returns list of whether primitive poses are editable
Returns:
[bool]
'''
self._lock.acquire()
editable = []
for primitive in self._seq:
editable += [primitive.pose_editable()]
self._lock.release()
return editable
def update_primitive_pose(self, primitive_number, position, orientation):
'''Update pose of primitive given by primitive_number
Args:
primitive_number (int)
position (Point)
orientation (OrientationRPY)
'''
rospy.loginfo("Updating primitive pose")
frame_id = self.get_ref_frame_names()[primitive_number]
pose_stamped = PoseStamped()
pose_stamped.header.frame_id = frame_id
pose_stamped.pose.position = position
roll = orientation.r
pitch = orientation.p
yaw = orientation.y
quat = tf.transformations.quaternion_from_euler(roll, pitch, yaw)
pose_stamped.pose.orientation = Quaternion(quat[0],
quat[1],
quat[2],
quat[3])
primitive = self._seq[primitive_number]
primitive.set_pose(pose_stamped)
self._primitive_pose_change()
def get_primitives(self):
'''Return list of primitives
Returns:
[Primitive]
'''
# self._lock.acquire()
primitives = self._seq
# self._lock.release()
return primitives
def get_primitive(self, index):
'''Returns primitive of the action based on index.
Args:
index (int): Index (0-based) of primitive to return.
Returns:
Primitive|None: Returns None if no such primitive exists.
'''
# NOTE(mbforbes): For this lock to be meaningful, we have to
# check that the index is valid within it.
self._lock.acquire()
n_primitives = len(self._seq)
if index < 0 or index >= n_primitives:
rospy.logerr("Requested primitive index " + str(index) +
", but only have " + str(n_primitives) +
" primitives.")
requested_primitive = None
else:
requested_primitive = self._seq[index]
self._lock.release()
return requested_primitive
def update_viz(self):
'''Updates the visualization of the action.'''
self._lock.acquire()
self._update_links()
m_array = MarkerArray()
for i in self._link_markers.keys():
m_array.markers.append(self._link_markers[i])
self._marker_publisher.publish(m_array)
self._lock.release()
def clear(self):
'''Clears the action.'''
self.reset_viz()
self._lock.acquire()
self._seq = []
self._link_markers = dict()
self._lock.release()
def decrease_id(self):
'''Decrement the action's id by one'''
self._action_id = self._action_id - 1
def switch_primitive_order(self, old_index, new_index):
'''Change the order of primitives in action
Args:
old_index (int)
new_index (int)
'''
self._lock.acquire()
primitive = self._seq.pop(old_index)
self._seq.insert(new_index, primitive)
relative_primitives = {}
for i in range(self.n_primitives()):
primitive = self._seq[i]
if primitive.get_ref_type() == ArmState.PREVIOUS_TARGET:
relative_primitives[i] = primitive.get_absolute_pose()
primitive.set_primitive_number(i)
for key in relative_primitives:
primitive = self._seq[key]
if primitive.get_ref_type() == ArmState.PREVIOUS_TARGET:
if key == 0:
primitive.change_ref_frame(ArmState.ROBOT_BASE,
Landmark())
else:
pose = relative_primitives[key]
new_pose = self._tf_listener.transformPose(
primitive.get_ref_frame_name(),
pose)
primitive.set_pose(new_pose)
self._lock.release()
self.update_viz()
for idx, primitive in enumerate(self._seq):
if primitive.is_selected():
self._primitive_click_cb(idx)
self._action_change_cb()
def delete_primitive(self, to_delete):
'''Deletes a primitive from the action.
NOTE(mbforbes): The lock should be acquired before calling this
method.
Args:
to_delete (int): The index of the primitive to delete.
'''
if self.n_primitives() == 0:
rospy.logwarn("No primitives to delete")
return
self._lock.acquire()
# if (to_delete + 1) < self.n_primitives():
self._seq[to_delete].hide_marker()
if self._seq[to_delete].is_selected():
self._primitive_click_cb(-1)
for i in range(to_delete + 1, self.n_primitives()):
self._seq[i].decrease_id()
if self.n_primitives() > (to_delete + 1):
next_primitive = self._seq[to_delete + 1]
if next_primitive.get_ref_type() == ArmState.PREVIOUS_TARGET:
if to_delete == 0:
next_primitive.change_ref_frame(ArmState.ROBOT_BASE,
Landmark())
else:
pose = next_primitive.get_absolute_pose()
new_pose = self._tf_listener.transformPose(
next_primitive.get_ref_frame_name(),
pose)
next_primitive.set_pose(new_pose)
self._seq.pop(to_delete)
# self._marker_visibility.pop(to_delete)
self._lock.release()
self.update_viz()
self._action_change_cb()
def execute_primitive(self, to_execute):
'''Execute specified primitive
Args:
to_execute (int)
'''
self._seq[to_execute].execute()
# ##################################################################
# Static methods: Internal ("private")
# ##################################################################
@staticmethod
def _get_link(primitive0, primitive1, marker_id):
'''Returns a marker representing a link b/w two consecutive
primitives (both must already exist).
Args:
primitive0 (Primitive)
primitive1 (Primitive)
marker_id (int) : id for link marker between to primitives
Returns:
Marker|None
'''
start = primitive0.get_absolute_marker_position(use_final=True)
end = primitive1.get_absolute_marker_position(use_final=False)
if start == end:
return None
elif not start is None and not end is None:
return Marker(type=Marker.ARROW,
id=marker_id,
lifetime=LINK_MARKER_LIFETIME,
scale=LINK_SCALE,
header=Header(frame_id=BASE_LINK),
color=LINK_COLOR,
points=[start, end])
else:
return None
# ##################################################################
# Instance methods: Internal ("private")
# ##################################################################
def _primitive_pose_change(self):
'''Update links when primitive pose changes'''
for primitive in self._seq:
primitive.update_viz()
# self._lock.release()
self.update_viz()
def _execute_action(self):
''' Function to replay the demonstrated action.'''
primitive = self.get_primitive(0)
rospy.loginfo("Starting to execute action!")
# Make sure the primitive exists.
if primitive is None:
rospy.logwarn("First primitive does not exist.")
self._status = ExecutionStatus.CONDITION_ERROR
self._status_publisher.publish(
String("First primitive does not exist."))
# Check if the very first precondition is met.
# Not actually implemented right now.
elif not self._check_pre_conditions():
self._status = ExecutionStatus.CONDITION_ERROR
else:
# Check that all parts of the action are reachable
if not self._is_action_reachable():
rospy.logwarn("Problem finding IK solutions.")
self._status = ExecutionStatus.NO_IK
self._status_publisher.publish(
String("Problem finding IK solutions."))
else:
self._loop_through_primitives()
self._robot.reset_arm_movement_history()
# If we haven't been preempted, we now report success.
if self._status == ExecutionStatus.EXECUTING:
self._status = ExecutionStatus.SUCCEEDED
rospy.loginfo("Action execution has succeeded.")
def _check_pre_conditions(self):
'''Loop through primitives and make sure all of their
preconditions are met
Returns:
bool
'''
for i in range(self.n_primitives()):
rospy.loginfo("checking preconditions " + str(i))
primitive = self.get_primitive(i)
# Make sure primitive exists.
if primitive is None:
rospy.logwarn("Primitive " + str(primitive.get_name()) + " does not exist.")
self._status = ExecutionStatus.CONDITION_ERROR
self._status_publisher.publish(
String("Primitive " + str(primitive.get_name()) + " does not exist."))
return False
# Check that preconditions are met (doesn't do anything right now)
else:
success, msg = primitive.check_pre_condition()
if not success:
rospy.logwarn(
"\tPreconditions of primitive " + str(primitive.get_name()) + " are not " +
"satisfied. " + msg)
self._status = ExecutionStatus.CONDITION_ERROR
self._status_publisher.publish(
String("Preconditions of primitive " + str(primitive.get_name()) +
" are not satisfied. " + msg))
return False
return True
def _is_action_reachable(self):
'''Make sure that action is possible to execute entire action'''
for i in range(len(self._seq)):
primitive = self.get_primitive(i)
if primitive is None:
rospy.logwarn("Primitive " + str(i) + " does not exist.")
break
else:
if not primitive.is_reachable():
return False
return True
def _loop_through_primitives(self):
'''Goes through the primitives of the current action and moves to
each.
'''
# Go over primitives of the action
for i in range(self.n_primitives()):
rospy.loginfo("Executing primitive " + str(i))
primitive = self.get_primitive(i)
# Make sure primitive exists.
if primitive is None:
rospy.logwarn("Primitive " + str(i) + " does not exist.")
self._status = ExecutionStatus.CONDITION_ERROR
self._status_publisher.publish(
String("Primitive " + str(i) + " does not exist."))
break
# Check that preconditions are met (doesn't do anything right now)
else:
# Try executing.
self._status = ExecutionStatus.EXECUTING
success, msg = primitive.execute()
if not success:
self._status = ExecutionStatus.NO_IK
self._status_publisher.publish(
String(msg))
break
# Finished executing; check that postconditions are met
success, msg = primitive.check_post_condition()
if success:
rospy.loginfo('\tPost-conditions of the action are met.')
else:
rospy.logwarn(
"\tPost-conditions of action primitive " + str(i) +
" are not satisfied. Aborting.")
self._status = ExecutionStatus.CONDITION_ERROR
self._status_publisher.publish(
String("Post-conditions of action primitive " +
str(i) + " are not satisfied. " + msg))
break
# Perhaps the execution was pre-empted by the user. Check
# this before continuing onto the next primitive.
if self._preempt:
rospy.logwarn("\tExecution preempted by user.")
self._status = ExecutionStatus.PREEMPTED
self._status_publisher.publish(
String("Execution preempted by user."))
break
# Primitive completed successfully.
rospy.loginfo("\tPrimitive " + str(i) + " of action is complete.")
def _update_markers(self):
'''Updates the markers after a change.'''
rospy.loginfo("Updating viz markers")
for primitive in self._seq:
primitive.update_viz()
def _update_links(self):
'''Updates the visualized links b/w action primitives.'''
current_num_links = len(self._link_markers)
new_num_links = len(self._seq) - 1
self._link_markers = {}
if new_num_links >= 1:
for i in range(new_num_links):
link_marker = Action._get_link(self._seq[i],
self._seq[i + 1],
i)
if not link_marker is None:
self._link_markers[i] = link_marker
if (current_num_links - new_num_links) > 0:
for i in range(new_num_links, current_num_links):
self._link_markers[i] = Marker(id=i, action=Marker.DELETE)
else:
marker = Marker()
marker.id = 0
self._link_markers[0] = marker
self._link_markers[0].action = Marker.DELETE
| <filename>fetch_pbd_interaction/src/fetch_pbd_interaction/action.py
'''The in-program representation of a programmed action.'''
# ######################################################################
# Imports
# ######################################################################
# Core ROS imports come first.
import rospy
# System builtins
import threading
# ROS builtins
from geometry_msgs.msg import Vector3, PoseStamped, Quaternion
from std_msgs.msg import Header, ColorRGBA, String
from visualization_msgs.msg import MarkerArray, Marker
import tf
# Local
from fetch_pbd_interaction.arm_target import ArmTarget
from fetch_pbd_interaction.arm_trajectory import ArmTrajectory
from fetch_pbd_interaction.grasp import Grasp
from fetch_pbd_interaction.msg import ExecutionStatus, OrientationRPY, \
ArmState, Landmark
# ######################################################################
# Module level constants
# ######################################################################
# Marker properties for little arrows drawn between consecutive primitives.
LINK_MARKER_LIFETIME = rospy.Duration()
LINK_SCALE = Vector3(0.01, 0.03, 0.03)
LINK_COLOR = ColorRGBA(0.8, 0.8, 0.8, 0.3) # sort of light gray
# ROS topics, etc.
TOPIC_MARKERS = '/fetch_pbd/visualization_marker_array'
# TODO(sarah): Is this necessary?
BASE_LINK = 'base_link'
# ######################################################################
# Classes
# ######################################################################
class Action:
'''Holds information for one action.'''
# TODO(sarah) : Probably get rid of this. Should the class get passed a
# shared marker publisher from the Session or each instance should have
# its own?
_marker_publisher = None
def __init__(self, robot, tf_listener, im_server, primitive_click_cb,
action_change_cb, action_id=None,
grasp_suggestion_service=None,
grasp_feedback_topic=None,
external_ee_link=None):
'''
Args:
robot (Robot) : interface to lower level robot functionality
tf_listener (TransformListener)
im_server (InteractiveMarkerSerever)
primitive_click_cb (function(int)): The function to call when a
primitive is clicked on (normally in the GUI). The function
should take the number of the primitive
action_id (int, optional): The index of this action.
'''
# Initialize a bunch of state.
self._name = "" # Human-friendly name for this action.
self._im_server = im_server
self._seq = []
self._action_id = action_id
self._robot = robot
self._primitive_click_cb = primitive_click_cb
self._action_change_cb = action_change_cb
self._status = ExecutionStatus.NOT_EXECUTING
self._preempt = False
self._tf_listener = tf_listener
self._primitive_counter = 0
# Markers to connect consecutive primitives together
self._link_markers = {}
self._grasp_suggestion_service = grasp_suggestion_service
self._grasp_feedback_topic = grasp_feedback_topic
self._external_ee_link = external_ee_link
# TODO(sarah): Understand this note better
# NOTE(mbforbes): It appears that this is locking manipulation
# of the internal sequence (self._seq). There have been race
# conditions involving this (e.g. marker_click_cb(...)).
#
# In general, be aware the other code calling these methods
# with data about this class (like how many primitives it holds)
# is bad because that means the outside code is assuming that it
# knows about state internal to this class, and that information
# may not be true by the time the code here gets executed. This
# is because there are several callbacks that trigger here so
# we must reason asyncronously.
#
# Unless the information you have (e.g. about the number of
# primitives that exist) was learned while this lock was acquired,
# you cannot assume it is true.
self._lock = threading.Lock()
self._status_publisher = rospy.Publisher('/fetch_pbd/fetch_pbd_status',
String,
queue_size=10)
if Action._marker_publisher is None:
Action._marker_publisher = rospy.Publisher(TOPIC_MARKERS,
MarkerArray,
queue_size=10,
latch=True)
# ##################################################################
# Instance methods: Public (API)
# ##################################################################
def head_busy(self):
'''Returns true if head is busy
Returns:
bool
'''
for primitive in self._seq:
if primitive.head_busy():
return True
return False
def get_action_id(self):
''' Returns action_id
Returns:
int
'''
return self._action_id
def set_action_id(self, action_id):
''' Returns action_id
Args:
action_id (int)
'''
self._action_id = action_id
def set_name(self, name):
'''Sets human-readable name for action
Args:
name (string)
'''
self._name = name
def get_name(self):
'''Returns human-readable name for action
Returns
(string)
'''
return self._name
def get_json(self):
'''Return json for this action for saving to db
Returns:
dict
'''
json = {}
json['name'] = self._name
json['primitive_counter'] = self._primitive_counter
json['id'] = self._action_id
json['seq'] = []
for primitive in self._seq:
json['seq'].append(primitive.get_json())
return json
def build_from_json(self, json):
'''Fills out action using information using json from db
Args:
dict : json/dict retrieved from couchdb
'''
enabled = True
self._action_id = json['id']
self._name = json['name']
self._primitive_counter = json['primitive_counter']
for primitive in json['seq']:
if primitive.has_key('arm_target'):
target = primitive['arm_target']
primitive = ArmTarget(self._robot, self._tf_listener,
self._im_server)
primitive.build_from_json(target)
elif primitive.has_key('arm_trajectory'):
target = primitive['arm_trajectory']
primitive = ArmTrajectory(self._robot, self._tf_listener,
self._im_server)
primitive.build_from_json(target)
elif primitive.has_key('grasp'):
if self._grasp_suggestion_service == "":
enabled = False
target = primitive['grasp']
primitive = Grasp(self._robot, self._tf_listener,
self._im_server,
self._grasp_suggestion_service,
self._grasp_feedback_topic,
self._external_ee_link)
primitive.build_from_json(target)
self.add_primitive(primitive, False, False)
self.reset_viz()
return enabled
def start_execution(self):
''' Starts execution of action.
This method spawns a new thread.
Args:
z_offset (float): Amount to add to z-values of pose
positions.
'''
# This will take long; create a thread.
self._preempt = False
self._status = ExecutionStatus.EXECUTING
thread = threading.Thread(
group=None,
target=self._execute_action,
name="action_execution_thread"
)
thread.start()
def stop_execution(self):
''' Indicate that user wants to preempt action execution '''
self._preempt = True
def end_execution(self):
''' Indicate that execution status can reset to
ExecutionStatus.NOT_EXECUTING
'''
self._status = ExecutionStatus.NOT_EXECUTING
def get_status(self):
'''Return execution status of action
Returns:
ExecutionStatus.EXECUTING|NOT_EXECUTING|...etc
'''
return self._status
def set_status(self, status):
'''Set execution status of action
Args:
status (ExecutionStatus.EXECUTING|NOT_EXECUTING|...etc)
'''
self._status = status
def add_primitive(self, primitive, add_marker=True, add_name=True):
'''Add primitive to action.
Args:
primitive (Primitive)
add_marker (bool)
add_name (bool)
'''
self._lock.acquire()
rospy.loginfo("Adding primitive")
if add_name:
primitive.set_name("primitive_" + str(self._primitive_counter))
self._primitive_counter += 1
primitive.add_marker_callbacks(
self.select_primitive, # marker_click_cb
self.delete_primitive,
self._primitive_pose_change,
self._action_change_cb
)
if primitive.get_ref_type() == ArmState.PREVIOUS_TARGET:
primitive.change_ref_frame(ArmState.PREVIOUS_TARGET, Landmark())
self._seq.append(primitive)
if add_marker:
# self._marker_visibility.append(True)
primitive.show_marker()
self._update_markers()
self._lock.release()
self.update_viz()
else:
# self._marker_visibility.append(False)
primitive.hide_marker()
self._lock.release()
rospy.loginfo("Primitive added")
def update_objects(self):
'''For each primitive, updates the reference frames based on
the locations of objects in the world
'''
self._lock.acquire()
rospy.loginfo("Updating objects")
for primitive in self._seq:
if not primitive.update_ref_frames():
primitive.hide_marker()
else:
primitive.show_marker()
self._update_markers()
self._lock.release()
self._action_change_cb()
def n_primitives(self):
'''Returns the number of primitives in this action.
Returns:
int
'''
return len(self._seq)
def reset_viz(self):
'''Removes all visualization relating to this action.'''
self._lock.acquire()
# Destroy the primitive markers.
for primitive in self._seq:
primitive.hide_marker()
self._im_server.clear()
# Mark the links for destruction.
for i in self._link_markers.keys():
self._link_markers[i].action = Marker.DELETE
# Publish the link destructions.
m_array = MarkerArray()
for i in self._link_markers.keys():
m_array.markers.append(self._link_markers[i])
self._marker_publisher.publish(m_array)
self._link_markers = {}
self._lock.release()
def delete_primitive_marker(self, primitive_number):
'''Delete marker with certain index
Args:
primitive_number (int)
'''
# self._marker_visibility[primitive_number] = False
if self.n_primitives() > 0:
primitive = self._seq[primitive_number]
primitive.hide_marker()
def make_primitive_marker(self, primitive_number):
'''Show marker with certain index
Args:
primitive_number (int)
'''
# self._marker_visibility[primitive_number] = True
primitive = self._seq[primitive_number]
if not primitive.show_marker():
rospy.logwarn
self._status_publisher.publish(
'Not showing marker for {}'.format(primitive.get_name()) +
' because no matching object found. Try "record objects"?')
def get_marker_visibility(self):
'''Returns visibility status of primitive markers
Returns:
[bool]
'''
marker_visibility = []
for primitive in self._seq:
marker_visibility += [primitive.marker_visible()]
return marker_visibility
def select_primitive(self, primitive_number, is_selected):
'''Callback for when one of the markers is clicked.
Selects clicked marker and unselects others.
Args:
primitive_number (int)
is_selected(bool): Whether the marker was
selected (True) or de-selected (False).
'''
self._lock.acquire()
for primitive in self._seq:
# If we match the one we've clicked on, select it.
if primitive.get_primitive_number() == primitive_number:
primitive.select(is_selected)
primitive.update_viz()
else:
# Otherwise, deselect it.
if primitive.is_control_visible():
primitive.select(False)
primitive.update_viz()
# If we selected it, really click on it.
if is_selected:
self._primitive_click_cb(primitive_number)
else:
self._primitive_click_cb(-1)
self._lock.release()
self.update_viz()
def initialize_viz(self):
'''Initialize visualization.'''
rospy.loginfo("Initialising viz for: {}".format(self.get_action_id()))
# self._lock.acquire()
# self._marker_visibility = [True] * len(self._seq)
marker_visibility = []
for i in range(len(self._seq)):
primitive = self._seq[i]
# Construct the markers.
marker_visibility.append(primitive.show_marker())
if False in marker_visibility:
rospy.logwarn("Not showing primitive markers because " +
"no objects present")
self._status_publisher.publish(
String("Not showing primitive markers because " +
"no objects present"))
self._update_markers()
# self._lock.release()
self.update_viz()
def delete_last_primitive(self):
'''Deletes the last primitive of the action.'''
if self.n_primitives() > 0:
self.delete_primitive(len(self._seq) - 1)
def is_object_required(self):
'''Returns whether this action has any primitives that are relative
to objects in the world (instead of absolute).
Returns:
bool
'''
is_required = False
self._lock.acquire()
for primitive in self._seq:
is_required = primitive.is_object_required()
if is_required:
break
self._lock.release()
return is_required
def get_ref_frame_names(self):
'''Returns the names of the reference frame objects for all
action primitives.
Returns:
[str]
'''
self._lock.acquire()
ref_frame_names = []
for primitive in self._seq:
ref_frame_names += [primitive.get_ref_frame_name()]
self._lock.release()
return ref_frame_names
def get_primitive_names(self):
'''Returns the names of primitives.
Returns:
[str]
'''
self._lock.acquire()
names = []
for primitive in self._seq:
names += [primitive.get_name()]
self._lock.release()
return names
def get_primitive_positions_orientations(self):
'''Returns the positions and orientations of primitives
Returns:
Point[], OrientationRPY[]
'''
self._lock.acquire()
positions = []
orientations = []
for primitive in self._seq:
pose = primitive.get_relative_pose()
quaternion = (
pose.pose.orientation.x,
pose.pose.orientation.y,
pose.pose.orientation.z,
pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
rpy = OrientationRPY(euler[0], euler[1], euler[2])
positions += [pose.pose.position]
orientations += [rpy]
self._lock.release()
return positions, orientations
def get_primitives_editable(self):
'''Returns list of whether primitive poses are editable
Returns:
[bool]
'''
self._lock.acquire()
editable = []
for primitive in self._seq:
editable += [primitive.pose_editable()]
self._lock.release()
return editable
def update_primitive_pose(self, primitive_number, position, orientation):
'''Update pose of primitive given by primitive_number
Args:
primitive_number (int)
position (Point)
orientation (OrientationRPY)
'''
rospy.loginfo("Updating primitive pose")
frame_id = self.get_ref_frame_names()[primitive_number]
pose_stamped = PoseStamped()
pose_stamped.header.frame_id = frame_id
pose_stamped.pose.position = position
roll = orientation.r
pitch = orientation.p
yaw = orientation.y
quat = tf.transformations.quaternion_from_euler(roll, pitch, yaw)
pose_stamped.pose.orientation = Quaternion(quat[0],
quat[1],
quat[2],
quat[3])
primitive = self._seq[primitive_number]
primitive.set_pose(pose_stamped)
self._primitive_pose_change()
def get_primitives(self):
'''Return list of primitives
Returns:
[Primitive]
'''
# self._lock.acquire()
primitives = self._seq
# self._lock.release()
return primitives
def get_primitive(self, index):
'''Returns primitive of the action based on index.
Args:
index (int): Index (0-based) of primitive to return.
Returns:
Primitive|None: Returns None if no such primitive exists.
'''
# NOTE(mbforbes): For this lock to be meaningful, we have to
# check that the index is valid within it.
self._lock.acquire()
n_primitives = len(self._seq)
if index < 0 or index >= n_primitives:
rospy.logerr("Requested primitive index " + str(index) +
", but only have " + str(n_primitives) +
" primitives.")
requested_primitive = None
else:
requested_primitive = self._seq[index]
self._lock.release()
return requested_primitive
def update_viz(self):
'''Updates the visualization of the action.'''
self._lock.acquire()
self._update_links()
m_array = MarkerArray()
for i in self._link_markers.keys():
m_array.markers.append(self._link_markers[i])
self._marker_publisher.publish(m_array)
self._lock.release()
def clear(self):
'''Clears the action.'''
self.reset_viz()
self._lock.acquire()
self._seq = []
self._link_markers = dict()
self._lock.release()
def decrease_id(self):
'''Decrement the action's id by one'''
self._action_id = self._action_id - 1
def switch_primitive_order(self, old_index, new_index):
'''Change the order of primitives in action
Args:
old_index (int)
new_index (int)
'''
self._lock.acquire()
primitive = self._seq.pop(old_index)
self._seq.insert(new_index, primitive)
relative_primitives = {}
for i in range(self.n_primitives()):
primitive = self._seq[i]
if primitive.get_ref_type() == ArmState.PREVIOUS_TARGET:
relative_primitives[i] = primitive.get_absolute_pose()
primitive.set_primitive_number(i)
for key in relative_primitives:
primitive = self._seq[key]
if primitive.get_ref_type() == ArmState.PREVIOUS_TARGET:
if key == 0:
primitive.change_ref_frame(ArmState.ROBOT_BASE,
Landmark())
else:
pose = relative_primitives[key]
new_pose = self._tf_listener.transformPose(
primitive.get_ref_frame_name(),
pose)
primitive.set_pose(new_pose)
self._lock.release()
self.update_viz()
for idx, primitive in enumerate(self._seq):
if primitive.is_selected():
self._primitive_click_cb(idx)
self._action_change_cb()
def delete_primitive(self, to_delete):
'''Deletes a primitive from the action.
NOTE(mbforbes): The lock should be acquired before calling this
method.
Args:
to_delete (int): The index of the primitive to delete.
'''
if self.n_primitives() == 0:
rospy.logwarn("No primitives to delete")
return
self._lock.acquire()
# if (to_delete + 1) < self.n_primitives():
self._seq[to_delete].hide_marker()
if self._seq[to_delete].is_selected():
self._primitive_click_cb(-1)
for i in range(to_delete + 1, self.n_primitives()):
self._seq[i].decrease_id()
if self.n_primitives() > (to_delete + 1):
next_primitive = self._seq[to_delete + 1]
if next_primitive.get_ref_type() == ArmState.PREVIOUS_TARGET:
if to_delete == 0:
next_primitive.change_ref_frame(ArmState.ROBOT_BASE,
Landmark())
else:
pose = next_primitive.get_absolute_pose()
new_pose = self._tf_listener.transformPose(
next_primitive.get_ref_frame_name(),
pose)
next_primitive.set_pose(new_pose)
self._seq.pop(to_delete)
# self._marker_visibility.pop(to_delete)
self._lock.release()
self.update_viz()
self._action_change_cb()
def execute_primitive(self, to_execute):
'''Execute specified primitive
Args:
to_execute (int)
'''
self._seq[to_execute].execute()
# ##################################################################
# Static methods: Internal ("private")
# ##################################################################
@staticmethod
def _get_link(primitive0, primitive1, marker_id):
'''Returns a marker representing a link b/w two consecutive
primitives (both must already exist).
Args:
primitive0 (Primitive)
primitive1 (Primitive)
marker_id (int) : id for link marker between to primitives
Returns:
Marker|None
'''
start = primitive0.get_absolute_marker_position(use_final=True)
end = primitive1.get_absolute_marker_position(use_final=False)
if start == end:
return None
elif not start is None and not end is None:
return Marker(type=Marker.ARROW,
id=marker_id,
lifetime=LINK_MARKER_LIFETIME,
scale=LINK_SCALE,
header=Header(frame_id=BASE_LINK),
color=LINK_COLOR,
points=[start, end])
else:
return None
# ##################################################################
# Instance methods: Internal ("private")
# ##################################################################
def _primitive_pose_change(self):
'''Update links when primitive pose changes'''
for primitive in self._seq:
primitive.update_viz()
# self._lock.release()
self.update_viz()
def _execute_action(self):
''' Function to replay the demonstrated action.'''
primitive = self.get_primitive(0)
rospy.loginfo("Starting to execute action!")
# Make sure the primitive exists.
if primitive is None:
rospy.logwarn("First primitive does not exist.")
self._status = ExecutionStatus.CONDITION_ERROR
self._status_publisher.publish(
String("First primitive does not exist."))
# Check if the very first precondition is met.
# Not actually implemented right now.
elif not self._check_pre_conditions():
self._status = ExecutionStatus.CONDITION_ERROR
else:
# Check that all parts of the action are reachable
if not self._is_action_reachable():
rospy.logwarn("Problem finding IK solutions.")
self._status = ExecutionStatus.NO_IK
self._status_publisher.publish(
String("Problem finding IK solutions."))
else:
self._loop_through_primitives()
self._robot.reset_arm_movement_history()
# If we haven't been preempted, we now report success.
if self._status == ExecutionStatus.EXECUTING:
self._status = ExecutionStatus.SUCCEEDED
rospy.loginfo("Action execution has succeeded.")
def _check_pre_conditions(self):
'''Loop through primitives and make sure all of their
preconditions are met
Returns:
bool
'''
for i in range(self.n_primitives()):
rospy.loginfo("checking preconditions " + str(i))
primitive = self.get_primitive(i)
# Make sure primitive exists.
if primitive is None:
rospy.logwarn("Primitive " + str(primitive.get_name()) + " does not exist.")
self._status = ExecutionStatus.CONDITION_ERROR
self._status_publisher.publish(
String("Primitive " + str(primitive.get_name()) + " does not exist."))
return False
# Check that preconditions are met (doesn't do anything right now)
else:
success, msg = primitive.check_pre_condition()
if not success:
rospy.logwarn(
"\tPreconditions of primitive " + str(primitive.get_name()) + " are not " +
"satisfied. " + msg)
self._status = ExecutionStatus.CONDITION_ERROR
self._status_publisher.publish(
String("Preconditions of primitive " + str(primitive.get_name()) +
" are not satisfied. " + msg))
return False
return True
def _is_action_reachable(self):
'''Make sure that action is possible to execute entire action'''
for i in range(len(self._seq)):
primitive = self.get_primitive(i)
if primitive is None:
rospy.logwarn("Primitive " + str(i) + " does not exist.")
break
else:
if not primitive.is_reachable():
return False
return True
def _loop_through_primitives(self):
'''Goes through the primitives of the current action and moves to
each.
'''
# Go over primitives of the action
for i in range(self.n_primitives()):
rospy.loginfo("Executing primitive " + str(i))
primitive = self.get_primitive(i)
# Make sure primitive exists.
if primitive is None:
rospy.logwarn("Primitive " + str(i) + " does not exist.")
self._status = ExecutionStatus.CONDITION_ERROR
self._status_publisher.publish(
String("Primitive " + str(i) + " does not exist."))
break
# Check that preconditions are met (doesn't do anything right now)
else:
# Try executing.
self._status = ExecutionStatus.EXECUTING
success, msg = primitive.execute()
if not success:
self._status = ExecutionStatus.NO_IK
self._status_publisher.publish(
String(msg))
break
# Finished executing; check that postconditions are met
success, msg = primitive.check_post_condition()
if success:
rospy.loginfo('\tPost-conditions of the action are met.')
else:
rospy.logwarn(
"\tPost-conditions of action primitive " + str(i) +
" are not satisfied. Aborting.")
self._status = ExecutionStatus.CONDITION_ERROR
self._status_publisher.publish(
String("Post-conditions of action primitive " +
str(i) + " are not satisfied. " + msg))
break
# Perhaps the execution was pre-empted by the user. Check
# this before continuing onto the next primitive.
if self._preempt:
rospy.logwarn("\tExecution preempted by user.")
self._status = ExecutionStatus.PREEMPTED
self._status_publisher.publish(
String("Execution preempted by user."))
break
# Primitive completed successfully.
rospy.loginfo("\tPrimitive " + str(i) + " of action is complete.")
def _update_markers(self):
'''Updates the markers after a change.'''
rospy.loginfo("Updating viz markers")
for primitive in self._seq:
primitive.update_viz()
def _update_links(self):
'''Updates the visualized links b/w action primitives.'''
current_num_links = len(self._link_markers)
new_num_links = len(self._seq) - 1
self._link_markers = {}
if new_num_links >= 1:
for i in range(new_num_links):
link_marker = Action._get_link(self._seq[i],
self._seq[i + 1],
i)
if not link_marker is None:
self._link_markers[i] = link_marker
if (current_num_links - new_num_links) > 0:
for i in range(new_num_links, current_num_links):
self._link_markers[i] = Marker(id=i, action=Marker.DELETE)
else:
marker = Marker()
marker.id = 0
self._link_markers[0] = marker
self._link_markers[0].action = Marker.DELETE
| en | 0.721188 | The in-program representation of a programmed action. # ###################################################################### # Imports # ###################################################################### # Core ROS imports come first. # System builtins # ROS builtins # Local # ###################################################################### # Module level constants # ###################################################################### # Marker properties for little arrows drawn between consecutive primitives. # sort of light gray # ROS topics, etc. # TODO(sarah): Is this necessary? # ###################################################################### # Classes # ###################################################################### Holds information for one action. # TODO(sarah) : Probably get rid of this. Should the class get passed a # shared marker publisher from the Session or each instance should have # its own? Args: robot (Robot) : interface to lower level robot functionality tf_listener (TransformListener) im_server (InteractiveMarkerSerever) primitive_click_cb (function(int)): The function to call when a primitive is clicked on (normally in the GUI). The function should take the number of the primitive action_id (int, optional): The index of this action. # Initialize a bunch of state. # Human-friendly name for this action. # Markers to connect consecutive primitives together # TODO(sarah): Understand this note better # NOTE(mbforbes): It appears that this is locking manipulation # of the internal sequence (self._seq). There have been race # conditions involving this (e.g. marker_click_cb(...)). # # In general, be aware the other code calling these methods # with data about this class (like how many primitives it holds) # is bad because that means the outside code is assuming that it # knows about state internal to this class, and that information # may not be true by the time the code here gets executed. This # is because there are several callbacks that trigger here so # we must reason asyncronously. # # Unless the information you have (e.g. about the number of # primitives that exist) was learned while this lock was acquired, # you cannot assume it is true. # ################################################################## # Instance methods: Public (API) # ################################################################## Returns true if head is busy Returns: bool Returns action_id Returns: int Returns action_id Args: action_id (int) Sets human-readable name for action Args: name (string) Returns human-readable name for action Returns (string) Return json for this action for saving to db Returns: dict Fills out action using information using json from db Args: dict : json/dict retrieved from couchdb Starts execution of action. This method spawns a new thread. Args: z_offset (float): Amount to add to z-values of pose positions. # This will take long; create a thread. Indicate that user wants to preempt action execution Indicate that execution status can reset to ExecutionStatus.NOT_EXECUTING Return execution status of action Returns: ExecutionStatus.EXECUTING|NOT_EXECUTING|...etc Set execution status of action Args: status (ExecutionStatus.EXECUTING|NOT_EXECUTING|...etc) Add primitive to action. Args: primitive (Primitive) add_marker (bool) add_name (bool) # marker_click_cb # self._marker_visibility.append(True) # self._marker_visibility.append(False) For each primitive, updates the reference frames based on the locations of objects in the world Returns the number of primitives in this action. Returns: int Removes all visualization relating to this action. # Destroy the primitive markers. # Mark the links for destruction. # Publish the link destructions. Delete marker with certain index Args: primitive_number (int) # self._marker_visibility[primitive_number] = False Show marker with certain index Args: primitive_number (int) # self._marker_visibility[primitive_number] = True Returns visibility status of primitive markers Returns: [bool] Callback for when one of the markers is clicked. Selects clicked marker and unselects others. Args: primitive_number (int) is_selected(bool): Whether the marker was selected (True) or de-selected (False). # If we match the one we've clicked on, select it. # Otherwise, deselect it. # If we selected it, really click on it. Initialize visualization. # self._lock.acquire() # self._marker_visibility = [True] * len(self._seq) # Construct the markers. # self._lock.release() Deletes the last primitive of the action. Returns whether this action has any primitives that are relative to objects in the world (instead of absolute). Returns: bool Returns the names of the reference frame objects for all action primitives. Returns: [str] Returns the names of primitives. Returns: [str] Returns the positions and orientations of primitives Returns: Point[], OrientationRPY[] Returns list of whether primitive poses are editable Returns: [bool] Update pose of primitive given by primitive_number Args: primitive_number (int) position (Point) orientation (OrientationRPY) Return list of primitives Returns: [Primitive] # self._lock.acquire() # self._lock.release() Returns primitive of the action based on index. Args: index (int): Index (0-based) of primitive to return. Returns: Primitive|None: Returns None if no such primitive exists. # NOTE(mbforbes): For this lock to be meaningful, we have to # check that the index is valid within it. Updates the visualization of the action. Clears the action. Decrement the action's id by one Change the order of primitives in action Args: old_index (int) new_index (int) Deletes a primitive from the action. NOTE(mbforbes): The lock should be acquired before calling this method. Args: to_delete (int): The index of the primitive to delete. # if (to_delete + 1) < self.n_primitives(): # self._marker_visibility.pop(to_delete) Execute specified primitive Args: to_execute (int) # ################################################################## # Static methods: Internal ("private") # ################################################################## Returns a marker representing a link b/w two consecutive primitives (both must already exist). Args: primitive0 (Primitive) primitive1 (Primitive) marker_id (int) : id for link marker between to primitives Returns: Marker|None # ################################################################## # Instance methods: Internal ("private") # ################################################################## Update links when primitive pose changes # self._lock.release() Function to replay the demonstrated action. # Make sure the primitive exists. # Check if the very first precondition is met. # Not actually implemented right now. # Check that all parts of the action are reachable # If we haven't been preempted, we now report success. Loop through primitives and make sure all of their preconditions are met Returns: bool # Make sure primitive exists. # Check that preconditions are met (doesn't do anything right now) Make sure that action is possible to execute entire action Goes through the primitives of the current action and moves to each. # Go over primitives of the action # Make sure primitive exists. # Check that preconditions are met (doesn't do anything right now) # Try executing. # Finished executing; check that postconditions are met # Perhaps the execution was pre-empted by the user. Check # this before continuing onto the next primitive. # Primitive completed successfully. Updates the markers after a change. Updates the visualized links b/w action primitives. | 1.961164 | 2 |
src/chip8/lib/system.py | slastrina/pyChip8SDL | 0 | 6632952 | import os
import tkinter as tk
from tkinter import filedialog
from chip8 import rom_path
from chip8.lib.cpu import Cpu
from chip8.lib.display import Display
from chip8.lib.ram import Ram
class System:
flags = {
'draw': False,
'running': False
}
def __init__(self):
self.ram = Ram()
self.display = Display(64, 32)
self.cpu = Cpu(self.ram.get_program_address(), self.ram, self.display)
def reset(self):
self.ram.reset()
self.display.reset()
self.cpu.reset()
def load_font(self):
font = [0xF0, 0x90, 0x90, 0x90, 0xF0, # 0
0x20, 0x60, 0x20, 0x20, 0x70, # 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, # 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, # 3
0x90, 0x90, 0xF0, 0x10, 0x10, # 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, # 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, # 6
0xF0, 0x10, 0x20, 0x40, 0x40, # 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, # 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, # 9
0xF0, 0x90, 0xF0, 0x90, 0x90, # A
0xE0, 0x90, 0xE0, 0x90, 0xE0, # B
0xF0, 0x80, 0x80, 0x80, 0xF0, # C
0xE0, 0x90, 0x90, 0x90, 0xE0, # D
0xF0, 0x80, 0xF0, 0x80, 0xF0, # E
0xF0, 0x80, 0xF0, 0x80, 0x80] # F
self.ram.set_block(font, 0)
def load_rom(self, filename=None):
if filename:
file_path = os.path.join(rom_path, filename)
else:
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(initialdir=rom_path)
with open(file_path, 'rb') as f:
self.ram.set_block(f.read(), self.ram.get_program_address())
def start(self):
# Consider running in a dedicated thread
self.cpu.running = True
while self.cpu.running:
self.cpu.tick() | import os
import tkinter as tk
from tkinter import filedialog
from chip8 import rom_path
from chip8.lib.cpu import Cpu
from chip8.lib.display import Display
from chip8.lib.ram import Ram
class System:
flags = {
'draw': False,
'running': False
}
def __init__(self):
self.ram = Ram()
self.display = Display(64, 32)
self.cpu = Cpu(self.ram.get_program_address(), self.ram, self.display)
def reset(self):
self.ram.reset()
self.display.reset()
self.cpu.reset()
def load_font(self):
font = [0xF0, 0x90, 0x90, 0x90, 0xF0, # 0
0x20, 0x60, 0x20, 0x20, 0x70, # 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, # 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, # 3
0x90, 0x90, 0xF0, 0x10, 0x10, # 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, # 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, # 6
0xF0, 0x10, 0x20, 0x40, 0x40, # 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, # 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, # 9
0xF0, 0x90, 0xF0, 0x90, 0x90, # A
0xE0, 0x90, 0xE0, 0x90, 0xE0, # B
0xF0, 0x80, 0x80, 0x80, 0xF0, # C
0xE0, 0x90, 0x90, 0x90, 0xE0, # D
0xF0, 0x80, 0xF0, 0x80, 0xF0, # E
0xF0, 0x80, 0xF0, 0x80, 0x80] # F
self.ram.set_block(font, 0)
def load_rom(self, filename=None):
if filename:
file_path = os.path.join(rom_path, filename)
else:
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(initialdir=rom_path)
with open(file_path, 'rb') as f:
self.ram.set_block(f.read(), self.ram.get_program_address())
def start(self):
# Consider running in a dedicated thread
self.cpu.running = True
while self.cpu.running:
self.cpu.tick() | en | 0.741148 | # 0 # 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # A # B # C # D # E # F # Consider running in a dedicated thread | 2.860827 | 3 |
Demos/light_demo.py | jr-garcia/Engendro3D | 8 | 6632953 | <reponame>jr-garcia/Engendro3D
from math import sin
from random import randint, random
from cycgkit.cgtypes import vec3
from _base._BaseDemo import _Demo_Base, runDemo, tubeMODEL, logLevelsEnum
class Demo(_Demo_Base):
def __init__(self):
super(Demo, self).__init__()
self.texturesToLoad = [['e3dlogo.png', 'logo'], ['./textures/n_deep.png', 'defND', True],
['./textures/n_irr.png', 'defNI', True], ['./textures/nmap_test.png', 'testN', True]]
# TODO: credit textures or replace them
self.bumpymats = []
self.texmats = []
self.spots = []
self.spotAngles = {}
def createLightSphere(self, ltype, pos, color):
nlight = self.scene1.addLight(ltype, pos, vec3(0, 0, 0))
nlight.color = color
nlight.spotIntensity = random() # .1
nlight.spotRange = .9
nlight.attenuation = randint(150, 300)
if ltype == 2:
self.spotAngles[nlight] = (randint(1, 30) - randint(10, 50)), (randint(1, 30) - randint(10, 50))
lmod = self.scene1.addModel('conemodel', nlight.ID + 'sph', pos, [0, 0, 0], 1)
self.spots.append((nlight, lmod))
else:
lmod = self.scene1.addModel('spheremodel', nlight.ID + 'sph', pos, [0, 0, 0], 1)
mat = lmod._materials[0]
mat.emissiveColor = color
mat.isLightAffected = False
def loadModels(self):
engine = self.engine
self.camera.rotateX(40)
self.camera.position = vec3(0, 340, 350)
engine.models.loadSphere("mainspheremodel", 32)
self.sphere1 = self.scene1.addModel('mainspheremodel', 'sphere1', [0, 10, 0], [0, 0, 0], 4, mass=8)
# self.sphere1.physicsBody.isDynamic = True
mats = self.sphere1.getMaterialByIndex(0)
mats.specularPower = 50
mats.useDiffuseTexture = True
mats.useNormalMapTexture = True
mats.normalMapTextureID = 'defND'
mats.textureRepeat = 4
self.bumpymats.append(mats)
self.texmats.append(mats)
engine.models.loadSphere("spheremodel", 12)
engine.models.loadCone("conemodel", 20, 10, radialSegments=20)
engine.models.loadBox("boxmodel", [6], 1)
self.box1 = self.scene1.addModel('boxmodel', 'box1', [0, 90, 0], [0, 90, 0], 5, mass=7)
mt = self.box1._materials[0]
mt.specularPower = 40
mt.useDiffuseTexture = True
mt.useNormalMapTexture = True
mt.normalMapTextureID = 'defNI'
self.bumpymats.append(mt)
self.texmats.append(mt)
engine.models.loadPlane("floorplane", 600, 600, 50)
# engine.models.loadPlane("planemodelback", 600, 300, 10)
engine.models.loadPlane("planemodelWalls", 600, 300, 50)
# IMPORTANT!: High number of segments (tesselation) is needed for large objects. See:
# https://www.opengl.org/archives/resources/features/KilgardTechniques/oglpitfall/
# 2. Poor Tessellation Hurts Lighting
self.floor = self.scene1.addModel('floorplane', 'floor', [0, 0, 0], [0, 0, 0], 1)
mt = self.floor._materials[0]
mt.specularPower = 50
mt.useDiffuseTexture = True
mt.useNormalMapTexture = True
mt.normalMapTextureID = 'defNI'
mt.textureRepeat = 10
self.bumpymats.append(mt)
self.texmats.append(mt)
self.planer = self.scene1.addModel('planemodelWalls', 'planer', [300, 150, 0], [90, 0, 0], 1)
self.planer.rotateY(-90)
mt = self.planer._materials[0]
mt.useNormalMapTexture = True
mt.normalMapTextureID = 'testN'
mt.textureRepeat = 10
self.bumpymats.append(mt)
self.planel = self.scene1.addModel('planemodelWalls', 'planel', [-300, 150, 0], [90, 0, 0], 1)
self.planel.rotateY(90)
self.planel._materials[0] = mt
self.planef = self.scene1.addModel('planemodelWalls', 'planef', [0, 150, -300], [90, 0, 0], 1)
self.planef.moveUp(self.planer.getSize().y)
self.planef._materials[0] = mt
engine.models.loadModel(tubeMODEL, "tubemodel")
self.tube = self.scene1.addModel('tubemodel', 'tube1', [-150, 0, 0], [0, 0, 0], 9)
self.tube.setAnimation(self.tube.getAnimationsList()[0], True)
self.tube2 = self.scene1.addModel('tubemodel', 'tube2', [150, 0, 0], [0, 0, 0], 9)
self.tube2.setAnimation(self.tube2.getAnimationsList()[1], True)
def addLights(self):
print('Adding Lights')
super(Demo, self).addLights()
self.dlight.enabled = False
self.createLightSphere(2, vec3(-259.0, 120.0, 0.0), vec3(1.0, 0.0, 0.0))
self.createLightSphere(2, vec3(0.0, 270.0, -190.0), vec3(1.0, 1.0, 0.0))
self.createLightSphere(1, vec3(-50.0, 30.0, 290.0), vec3(0.0, 1.0, 0.0))
self.createLightSphere(2, vec3(0.0, 150.0, 0.0), vec3(.50, .0, 1.0))
self.createLightSphere(1, vec3(280.0, 30.0, 10.0), vec3(0.0, .0, 1.0))
def mouseMove(self, ev):
if ev.eventName == 'motion':
if self.window.hasFocus():
r = 1.0 / 10 if self.window.mouseLock else 1
self.camera.rotateY(-ev.xRel * r)
self.camera.rotateX(ev.yRel * r)
def keydown(self, e):
if e.eventName == 'keyUp':
return
keyName = e.keyName
if 'shift' in keyName:
self.window.mouseLock = not self.window.mouseLock
if keyName == 'escape': # ESC
self.close()
if keyName == 'f8':
self.window.backend.debugModeActive = not self.window.backend.debugModeActive
if keyName == 'f4':
self.window.backend.showAsWireframe = not self.window.backend.showAsWireframe
if keyName == 'space':
self.window.setFullScreen(not self.window.isFullScreen())
if keyName.__contains__('ctrl'):
self.dorot = not self.dorot
if keyName == 'f1':
np = [round(d, 3) for d in self.camera.position]
engine = self.engine
engine.log('Camera pos:{0}'.format(str(np)), logLevelsEnum.info)
engine.log('Poligons drawn:{}'.format(self.window.backend.poligonsDrawnThisUpdate), logLevelsEnum.info)
if keyName == 'g':
val = self.window.gamma
print('old gamma:' + str(val))
if val <= 1.8:
self.window.gamma = 2.5
else:
self.window.gamma = 1.7
print('new gamma:' + str(self.window.gamma))
if keyName == 'l':
self.dlight.enabled = not self.dlight.enabled
if keyName == 'n':
for mat in self.bumpymats:
mat.useNormalMapTexture = not mat.useNormalMapTexture
if keyName == 't':
for mat in self.texmats:
mat.useDiffuseTexture = not mat.useDiffuseTexture
def scene1Update(self, ev):
ft = ev[0] + .01
movespeed = ft / 10.0
self.scene1.ambientColor = vec3(.004, .006, .009)
self.scene1.bgColor = vec3(.04, .06, .09)
for s, m in self.spots:
rotVec = vec3(self.spotAngles[s][0] * sin(ev[1] / 1000.0), 0, self.spotAngles[s][1] * sin(ev[1] / 500.0))
s.rotation = rotVec
m.rotation = rotVec
if self.dorot:
self.sphere1.rotateY(-.07 * ft)
if self.window.events.isKeyPressed('w'):
self.camera.moveForward(movespeed)
elif self.window.events.isKeyPressed('s'):
self.camera.moveBackward(movespeed)
if self.window.events.isKeyPressed('a'):
self.camera.moveLeft(movespeed)
elif self.window.events.isKeyPressed('d'):
self.camera.moveRight(movespeed)
if self.window.events.isKeyPressed('up'):
self.camera.moveUp(movespeed)
elif self.window.events.isKeyPressed('down'):
self.camera.moveDown(movespeed)
if __name__ == '__main__':
runDemo(Demo(), 'Light Demo')
| from math import sin
from random import randint, random
from cycgkit.cgtypes import vec3
from _base._BaseDemo import _Demo_Base, runDemo, tubeMODEL, logLevelsEnum
class Demo(_Demo_Base):
def __init__(self):
super(Demo, self).__init__()
self.texturesToLoad = [['e3dlogo.png', 'logo'], ['./textures/n_deep.png', 'defND', True],
['./textures/n_irr.png', 'defNI', True], ['./textures/nmap_test.png', 'testN', True]]
# TODO: credit textures or replace them
self.bumpymats = []
self.texmats = []
self.spots = []
self.spotAngles = {}
def createLightSphere(self, ltype, pos, color):
nlight = self.scene1.addLight(ltype, pos, vec3(0, 0, 0))
nlight.color = color
nlight.spotIntensity = random() # .1
nlight.spotRange = .9
nlight.attenuation = randint(150, 300)
if ltype == 2:
self.spotAngles[nlight] = (randint(1, 30) - randint(10, 50)), (randint(1, 30) - randint(10, 50))
lmod = self.scene1.addModel('conemodel', nlight.ID + 'sph', pos, [0, 0, 0], 1)
self.spots.append((nlight, lmod))
else:
lmod = self.scene1.addModel('spheremodel', nlight.ID + 'sph', pos, [0, 0, 0], 1)
mat = lmod._materials[0]
mat.emissiveColor = color
mat.isLightAffected = False
def loadModels(self):
engine = self.engine
self.camera.rotateX(40)
self.camera.position = vec3(0, 340, 350)
engine.models.loadSphere("mainspheremodel", 32)
self.sphere1 = self.scene1.addModel('mainspheremodel', 'sphere1', [0, 10, 0], [0, 0, 0], 4, mass=8)
# self.sphere1.physicsBody.isDynamic = True
mats = self.sphere1.getMaterialByIndex(0)
mats.specularPower = 50
mats.useDiffuseTexture = True
mats.useNormalMapTexture = True
mats.normalMapTextureID = 'defND'
mats.textureRepeat = 4
self.bumpymats.append(mats)
self.texmats.append(mats)
engine.models.loadSphere("spheremodel", 12)
engine.models.loadCone("conemodel", 20, 10, radialSegments=20)
engine.models.loadBox("boxmodel", [6], 1)
self.box1 = self.scene1.addModel('boxmodel', 'box1', [0, 90, 0], [0, 90, 0], 5, mass=7)
mt = self.box1._materials[0]
mt.specularPower = 40
mt.useDiffuseTexture = True
mt.useNormalMapTexture = True
mt.normalMapTextureID = 'defNI'
self.bumpymats.append(mt)
self.texmats.append(mt)
engine.models.loadPlane("floorplane", 600, 600, 50)
# engine.models.loadPlane("planemodelback", 600, 300, 10)
engine.models.loadPlane("planemodelWalls", 600, 300, 50)
# IMPORTANT!: High number of segments (tesselation) is needed for large objects. See:
# https://www.opengl.org/archives/resources/features/KilgardTechniques/oglpitfall/
# 2. Poor Tessellation Hurts Lighting
self.floor = self.scene1.addModel('floorplane', 'floor', [0, 0, 0], [0, 0, 0], 1)
mt = self.floor._materials[0]
mt.specularPower = 50
mt.useDiffuseTexture = True
mt.useNormalMapTexture = True
mt.normalMapTextureID = 'defNI'
mt.textureRepeat = 10
self.bumpymats.append(mt)
self.texmats.append(mt)
self.planer = self.scene1.addModel('planemodelWalls', 'planer', [300, 150, 0], [90, 0, 0], 1)
self.planer.rotateY(-90)
mt = self.planer._materials[0]
mt.useNormalMapTexture = True
mt.normalMapTextureID = 'testN'
mt.textureRepeat = 10
self.bumpymats.append(mt)
self.planel = self.scene1.addModel('planemodelWalls', 'planel', [-300, 150, 0], [90, 0, 0], 1)
self.planel.rotateY(90)
self.planel._materials[0] = mt
self.planef = self.scene1.addModel('planemodelWalls', 'planef', [0, 150, -300], [90, 0, 0], 1)
self.planef.moveUp(self.planer.getSize().y)
self.planef._materials[0] = mt
engine.models.loadModel(tubeMODEL, "tubemodel")
self.tube = self.scene1.addModel('tubemodel', 'tube1', [-150, 0, 0], [0, 0, 0], 9)
self.tube.setAnimation(self.tube.getAnimationsList()[0], True)
self.tube2 = self.scene1.addModel('tubemodel', 'tube2', [150, 0, 0], [0, 0, 0], 9)
self.tube2.setAnimation(self.tube2.getAnimationsList()[1], True)
def addLights(self):
print('Adding Lights')
super(Demo, self).addLights()
self.dlight.enabled = False
self.createLightSphere(2, vec3(-259.0, 120.0, 0.0), vec3(1.0, 0.0, 0.0))
self.createLightSphere(2, vec3(0.0, 270.0, -190.0), vec3(1.0, 1.0, 0.0))
self.createLightSphere(1, vec3(-50.0, 30.0, 290.0), vec3(0.0, 1.0, 0.0))
self.createLightSphere(2, vec3(0.0, 150.0, 0.0), vec3(.50, .0, 1.0))
self.createLightSphere(1, vec3(280.0, 30.0, 10.0), vec3(0.0, .0, 1.0))
def mouseMove(self, ev):
if ev.eventName == 'motion':
if self.window.hasFocus():
r = 1.0 / 10 if self.window.mouseLock else 1
self.camera.rotateY(-ev.xRel * r)
self.camera.rotateX(ev.yRel * r)
def keydown(self, e):
if e.eventName == 'keyUp':
return
keyName = e.keyName
if 'shift' in keyName:
self.window.mouseLock = not self.window.mouseLock
if keyName == 'escape': # ESC
self.close()
if keyName == 'f8':
self.window.backend.debugModeActive = not self.window.backend.debugModeActive
if keyName == 'f4':
self.window.backend.showAsWireframe = not self.window.backend.showAsWireframe
if keyName == 'space':
self.window.setFullScreen(not self.window.isFullScreen())
if keyName.__contains__('ctrl'):
self.dorot = not self.dorot
if keyName == 'f1':
np = [round(d, 3) for d in self.camera.position]
engine = self.engine
engine.log('Camera pos:{0}'.format(str(np)), logLevelsEnum.info)
engine.log('Poligons drawn:{}'.format(self.window.backend.poligonsDrawnThisUpdate), logLevelsEnum.info)
if keyName == 'g':
val = self.window.gamma
print('old gamma:' + str(val))
if val <= 1.8:
self.window.gamma = 2.5
else:
self.window.gamma = 1.7
print('new gamma:' + str(self.window.gamma))
if keyName == 'l':
self.dlight.enabled = not self.dlight.enabled
if keyName == 'n':
for mat in self.bumpymats:
mat.useNormalMapTexture = not mat.useNormalMapTexture
if keyName == 't':
for mat in self.texmats:
mat.useDiffuseTexture = not mat.useDiffuseTexture
def scene1Update(self, ev):
ft = ev[0] + .01
movespeed = ft / 10.0
self.scene1.ambientColor = vec3(.004, .006, .009)
self.scene1.bgColor = vec3(.04, .06, .09)
for s, m in self.spots:
rotVec = vec3(self.spotAngles[s][0] * sin(ev[1] / 1000.0), 0, self.spotAngles[s][1] * sin(ev[1] / 500.0))
s.rotation = rotVec
m.rotation = rotVec
if self.dorot:
self.sphere1.rotateY(-.07 * ft)
if self.window.events.isKeyPressed('w'):
self.camera.moveForward(movespeed)
elif self.window.events.isKeyPressed('s'):
self.camera.moveBackward(movespeed)
if self.window.events.isKeyPressed('a'):
self.camera.moveLeft(movespeed)
elif self.window.events.isKeyPressed('d'):
self.camera.moveRight(movespeed)
if self.window.events.isKeyPressed('up'):
self.camera.moveUp(movespeed)
elif self.window.events.isKeyPressed('down'):
self.camera.moveDown(movespeed)
if __name__ == '__main__':
runDemo(Demo(), 'Light Demo') | en | 0.680271 | # TODO: credit textures or replace them # .1 # self.sphere1.physicsBody.isDynamic = True # engine.models.loadPlane("planemodelback", 600, 300, 10) # IMPORTANT!: High number of segments (tesselation) is needed for large objects. See: # https://www.opengl.org/archives/resources/features/KilgardTechniques/oglpitfall/ # 2. Poor Tessellation Hurts Lighting # ESC | 2.166114 | 2 |
Python/C1 - Intro/buggy_fixed.py | mrbinx/mrbinx_python | 0 | 6632954 | """
Task 1
@purpose This program requests for a positive integer, and prints out all primes less than the specified integer.
@author <NAME> 25461257
@since 20140803
@modified 20140806
@complexity O(n^2)
@precondition: The user inputs a positive integer
@postcondition: Primes less than the input are printed out
Changes:
Line 17: original was (n=2), should be == comparison operator
Line 22,24,29: Boolean value True/False should be capitalised for first character, original was true/false
Line 23: n%2==1 should be n%2==0 in order to check for even numbers
Line 26: Instead of k*k<n, should be k<n
Line 20: Added a default flag=True assumption to catch errors about unassigned flag values, since if it's not 2, not even, not 1,
and not divisible by any integer up to sqrt(n), we can say that it's a prime number
Line 37: Added a check to see if integer is a positive number
"""
#import math
def is_prime(n):
"""
@purpose Checks whether the passed number, n is prime, and return True/False
@parameters: n - the number to be checked for primeness
@complexity: O(n)
@precondition: The function is passed a positive integer value
@postcondition: Returns a true/false depending on primeness
"""
k = 3
flag = True
if (n == 2): #if it's 2, it's prime
flag = True
elif (n % 2 == 0 or n == 1): #if even number or 1, then not prime
flag = False
else:
while (k < n):
#while (k <= math.sqrt(n)): alternative, we only have to do trial divison on numbers up to sqrt(n)
if (n % k == 0):
flag = False
break
k += 1
return flag
#MAIN BLOCK
try:
n = int(input('Please enter a positive integer: ')) #request input from user
if n >= 0: #check if integer is positive
for i in range(n): #iterate from 0 to n
if (is_prime(i)): #if i is prime, print i
print(i)
else:
print("The integer inputted is not positive.")
except ValueError:
print("Invalid input.")
| """
Task 1
@purpose This program requests for a positive integer, and prints out all primes less than the specified integer.
@author <NAME> 25461257
@since 20140803
@modified 20140806
@complexity O(n^2)
@precondition: The user inputs a positive integer
@postcondition: Primes less than the input are printed out
Changes:
Line 17: original was (n=2), should be == comparison operator
Line 22,24,29: Boolean value True/False should be capitalised for first character, original was true/false
Line 23: n%2==1 should be n%2==0 in order to check for even numbers
Line 26: Instead of k*k<n, should be k<n
Line 20: Added a default flag=True assumption to catch errors about unassigned flag values, since if it's not 2, not even, not 1,
and not divisible by any integer up to sqrt(n), we can say that it's a prime number
Line 37: Added a check to see if integer is a positive number
"""
#import math
def is_prime(n):
"""
@purpose Checks whether the passed number, n is prime, and return True/False
@parameters: n - the number to be checked for primeness
@complexity: O(n)
@precondition: The function is passed a positive integer value
@postcondition: Returns a true/false depending on primeness
"""
k = 3
flag = True
if (n == 2): #if it's 2, it's prime
flag = True
elif (n % 2 == 0 or n == 1): #if even number or 1, then not prime
flag = False
else:
while (k < n):
#while (k <= math.sqrt(n)): alternative, we only have to do trial divison on numbers up to sqrt(n)
if (n % k == 0):
flag = False
break
k += 1
return flag
#MAIN BLOCK
try:
n = int(input('Please enter a positive integer: ')) #request input from user
if n >= 0: #check if integer is positive
for i in range(n): #iterate from 0 to n
if (is_prime(i)): #if i is prime, print i
print(i)
else:
print("The integer inputted is not positive.")
except ValueError:
print("Invalid input.")
| en | 0.806017 | Task 1 @purpose This program requests for a positive integer, and prints out all primes less than the specified integer. @author <NAME> 25461257 @since 20140803 @modified 20140806 @complexity O(n^2) @precondition: The user inputs a positive integer @postcondition: Primes less than the input are printed out Changes: Line 17: original was (n=2), should be == comparison operator Line 22,24,29: Boolean value True/False should be capitalised for first character, original was true/false Line 23: n%2==1 should be n%2==0 in order to check for even numbers Line 26: Instead of k*k<n, should be k<n Line 20: Added a default flag=True assumption to catch errors about unassigned flag values, since if it's not 2, not even, not 1, and not divisible by any integer up to sqrt(n), we can say that it's a prime number Line 37: Added a check to see if integer is a positive number #import math @purpose Checks whether the passed number, n is prime, and return True/False @parameters: n - the number to be checked for primeness @complexity: O(n) @precondition: The function is passed a positive integer value @postcondition: Returns a true/false depending on primeness #if it's 2, it's prime #if even number or 1, then not prime #while (k <= math.sqrt(n)): alternative, we only have to do trial divison on numbers up to sqrt(n) #MAIN BLOCK #request input from user #check if integer is positive #iterate from 0 to n #if i is prime, print i | 4.126724 | 4 |
workspace/module/python-2.7/LxMtx/mtxObjAbs.py | no7hings/Lynxi | 2 | 6632955 | <filename>workspace/module/python-2.7/LxMtx/mtxObjAbs.py
# coding:utf-8
from LxBasic import bscMethods
from LxData import datObjAbs
from LxGraphic import grhObjAbs
from . import mtxCfg
class Abs_MtxBasic(mtxCfg.MtxUtility):
pass
# ******************************************************************************************************************** #
class Abs_MtxObjLoader(grhObjAbs.Abs_GrhObjLoader):
def _initAbsMtxObjLoader(self, *args):
self._initAbsGrhObjLoader(*args)
# **************************************************************************************************************** #
@classmethod
def _obj_loader_cls__set_node_raw_create_(cls, *args):
(
nodeRawDict,
typepathStr,
orig_node_raw_dict,
orig_otport_raw_list_dict,
orig_child_port_raw_list_dict
) = args
_datatypeStr = orig_node_raw_dict[cls.DEF_grh__key_node_datatype]
# property
nodeRawDict[cls.DEF_grh__key_node_typepath] = typepathStr
nodeRawDict[cls.DEF_grh__key_node_datatype] = _datatypeStr
# port
_portRawList = []
_orig_port_raw_list = orig_node_raw_dict[cls.DEF_grh__key_port]
cls._obj_loader_cls__set_ports_create_(_portRawList, _orig_port_raw_list, orig_child_port_raw_list_dict)
_orig_otport_raw_list = orig_otport_raw_list_dict.get(_datatypeStr, [])
cls._obj_loader_cls__set_ports_create_(_portRawList, _orig_otport_raw_list, orig_child_port_raw_list_dict)
nodeRawDict[cls.DEF_grh__key_port] = _portRawList
# **************************************************************************************************************** #
@classmethod
def _obj_loader_cls__set_ports_create_(cls, *args):
portRawList, orig_port_raw_list, orig_child_port_raw_list_dict = args
for orig_port_raw in orig_port_raw_list:
cls._obj_loader_cls__set_port_create_(portRawList, orig_port_raw, orig_child_port_raw_list_dict)
@classmethod
def _obj_loader_cls__set_port_create_(cls, *args):
portRawList, orig_port_raw, orig_child_port_raw_list_dict = args
_portpathStr = orig_port_raw[cls.DEF_grh__key_portpath]
if cls.DEF_grh__key_porttype in orig_port_raw:
_porttypeStr = orig_port_raw[cls.DEF_grh__key_porttype]
else:
_porttypeStr = None
_datatypeStr = orig_port_raw[cls.DEF_grh__key_port_datatype]
_portrawStr = orig_port_raw[cls.DEF_grh__key_portraw]
_assignStr = orig_port_raw[cls.DEF_grh__key_assign]
_childStrList = []
# add parent first
cls._obj_loader_cls__set_port_raw_add_(
portRawList,
portpath=_portpathStr,
porttype=_porttypeStr,
datatype=_datatypeStr,
portraw=_portrawStr,
assign=_assignStr,
children=_childStrList
)
orig_child_port_raw_list = orig_child_port_raw_list_dict.get(_datatypeStr, [])
cls._obj_loader_cls__set_port_children_create_(
portRawList, _childStrList, orig_port_raw, orig_child_port_raw_list
)
@classmethod
def _obj_loader_cls__set_port_children_create_(cls, *args):
portRawList, childStrList, orig_parent_port_raw, orig_child_port_raw_list = args
for _index, _orig_child_port_raw in enumerate(orig_child_port_raw_list):
cls._obj_loader_cls__set_port_child_create_(
portRawList, childStrList, orig_parent_port_raw, _orig_child_port_raw,
_index
)
@classmethod
def _obj_loader_cls__set_port_child_create_(cls, *args):
portRawList, childStrList, origParentPortRaw, origPortRaw, childIndex = args
_parentPortpathStr = origParentPortRaw[cls.DEF_grh__key_portpath]
_parentPorttypeStr = origParentPortRaw[cls.DEF_grh__key_port_datatype]
parentPortrawString = origParentPortRaw[cls.DEF_grh__key_portraw]
parentAssignString = origParentPortRaw[cls.DEF_grh__key_assign]
_formatString = origPortRaw[cls.DEF_grh__key_format]
_portpathStr = _formatString.format(
**{
cls.DEF_grh__key_portpath: _parentPortpathStr
}
)
_datatypeStr = origPortRaw[cls.DEF_grh__key_port_datatype]
if parentPortrawString:
_portrawStr = parentPortrawString.split(u',')[childIndex].rstrip().lstrip()
else:
_portrawStr = origPortRaw[cls.DEF_grh__key_portraw]
if parentAssignString == cls.DEF_grh__keyword__gnport:
_portAssignString = cls.DEF_grh__keyword__gnport_channel
if parentAssignString == cls.DEF_grh__keyword__inport:
_portAssignString = cls.DEF_grh__keyword__inport_channel
elif parentAssignString == cls.DEF_grh__keyword__otport:
_portAssignString = cls.DEF_grh__keyword__otport_channel
else:
raise TypeError()
cls._obj_loader_cls__set_port_raw_add_(
portRawList,
portpath=_portpathStr,
porttype=_parentPorttypeStr,
datatype=_parentPorttypeStr,
portraw=_portrawStr,
assign=_portAssignString,
parent=_parentPortpathStr,
children=[]
)
childStrList.append(_portpathStr)
# **************************************************************************************************************** #
@classmethod
def _grh__obj_loader_cls__get_definition_node_raw_(cls, *args):
out_node_raw_dict = cls.CLS_ordered_dict()
cls._obj_loader_cls__set_node_raw_create_(
out_node_raw_dict,
*args
)
return out_node_raw_dict
# ******************************************************************************************************************** #
class Abs_MtxObjQueryBuilder(grhObjAbs.Abs_GrhObjQueryrawCreator):
def _initAbsMtxObjQueryBuilder(self, *args):
self._initAbsGrhObjQueryBuilder(*args)
# **************************************************************************************************************** #
def _queryraw_loader__set_build_(self):
self._nodeRaws = bscMethods.OsJsonFile.read(
self.VAR_grh__node_file
) or {}
self._materialRaws = bscMethods.OsJsonFile.read(
self.VAR_grh__material_file
) or {}
self._geometryRaws = bscMethods.OsJsonFile.read(
self.VAR_grh__geometry_file
) or {}
self._origOtportRaw = bscMethods.OsJsonFile.read(
self.VAR_grh__output_file
) or {}
self._origPortChildRaw = bscMethods.OsJsonFile.read(
self.VAR_grh__port_child_file
) or {}
self._origNodeRaws = self.CLS_ordered_dict()
for i in [
self._nodeRaws, self._materialRaws, self._geometryRaws
]:
self._origNodeRaws.update(i)
# **************************************************************************************************************** #
def _queryraw_loader__get_node_raw_(self, *args):
typepathStr = args[0]
if typepathStr in self._origNodeRaws:
origNodeRaw = self._origNodeRaws[typepathStr]
return self.CLS_grh__obj_query_creator__obj_loader.getDefinitionNodeRaw(
typepathStr, origNodeRaw, self._origOtportRaw, self._origPortChildRaw
)
# **************************************************************************************************************** #
def _queryraw_loader__get_category_exist_(self, *args):
typepathStr = args[0]
return typepathStr in self._origNodeRaws
def _queryraw_loader__get_categories_(self):
return self._origNodeRaws.keys()
# ******************************************************************************************************************** #
class Abs_MtxObjQueue(grhObjAbs.Abs_GrhObjQueue):
def _initAbsMtxObjQueue(self, *args):
self._initAbsGrhObjQueue(*args)
# raw **************************************************************************************************************** #
class Abs_MtxRaw(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatRaw
):
def _initAbsMtxRaw(self, *args):
self._initAbsDatRaw(*args)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxDatatype(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatDatatype
):
def _initAbsMtxDatatype(self, *args):
self._initAbsDatDatatype(*args)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxObjProxyNamespace(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatObjNamespace
):
def _initAbsMtxObjProxyNamespace(self, *args):
self._initAbsDatObjNamespace(*args)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxName(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatName
):
def _initAbsMtxName(self, *args):
self._initAbsDatName(*args)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxObjTypename(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatObjName,
):
def _initAbsMtxObjTypename(self, *args):
self._initAbsDatObjName(*args)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxObjName(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatObjName,
):
def _initAbsMtxObjName(self, *args):
self._initAbsDatObjName(*args)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxPath(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatObjPath
):
def _initAbsMtxPath(self, *args):
self._initAbsDatObjPath(*args)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxAttrpath(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatObjComppath
):
def _initAbsMtxAttrpath(self, *args):
self._initAbsDatObjComppath(*args)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
# object set ********************************************************************************************************* #
class Abs_MtxObjSet(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhObjStack
):
def _initAbsMtxObjSet(self, *args):
self._initAbsGrhObjStack(*args)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
# value ************************************************************************************************************** #
class Abs_MtxValue(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatValue
):
def _initAbsMtxValue(self, *args):
self._initAbsDatValue(*args)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
self.datatype(), self.data()
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
# ******************************************************************************************************************** #
class Abs_MtxPort(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhPort
):
def _initAbsMtxPort(self, *args, **kwargs):
self._initAbsGrhPort(*args, **kwargs)
self._initAbsDatXmlObj()
self._proxyObj = None
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_attach_value_str_(self):
return self.portpathString()
def _xml_obj__get_attribute_attach_list_(self):
if self.isChannel() is True:
# <... nodename="nodepath" member="parent portpath" channel="portname" />
return [
self.parent(),
(self._xml_obj__get_attribute_attach_key_str_(), self.portnameString())
]
else:
# <... nodename = "nodepath" member = "portpath" />
return [
self.node(),
(self._xml_obj__get_attribute_attach_key_str_(), self.portpathString())
]
def _xml_obj__get_attribute_list_(self):
return [
self.portpath(),
self.datatype(),
self.portgiven()
]
class Abs_MtxNode(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhNode
):
def _initAbsMtxNode(self, *args, **kwargs):
self._initAbsGrhNode(*args, **kwargs)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_element_prefix_str(self):
return self.typepathString()
def _xml_obj__get_attribute_list_(self):
return [
self.path(),
self.datatype()
]
def _xml_obj__get_child_element_list_(self):
return self.changedInport()
def _xml_obj__get_attribute_attach_value_str_(self):
return self.pathString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxConnector(
grhObjAbs.Abs_GrhConnector
):
def _initAbsMtxConnector(self, *args):
self._initAbsGrhConnector(*args)
# port proxy ********************************************************************************************************* #
class Abs_MtxPortProxy(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhPortProxy,
):
def _initAbsMtxPortProxy(self, *args, **kwargs):
self._initAbsGrhPortProxy(*args, **kwargs)
self._initAbsDatXmlObj()
def _xml_obj__get_attribute_list_(self):
return [
self.bindObject().portpath(),
self.bindObject().datatype(),
self.bindPortgiven()
]
class Abs_MtxShaderProxy(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhShaderProxy
):
def _initAbsMtxShaderProxy(self, *args, **kwargs):
self._initAbsGrhShaderProxy(*args, **kwargs)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
self.path(),
self.bindObject().typepath(),
[(u'context', self._shader_proxy__get_material_context_())]
]
def _xml_obj__get_child_element_list_(self):
return self.changedBindInportProxies()
class Abs_MtxMaterialProxy(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhMaterialProxy
):
def _initAbsMtxMaterialProxy(self, *args, **kwargs):
self._initAbsGrhMaterialProxy(*args, **kwargs)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
self.path()
]
def _xml_obj__get_child_element_list_(self):
# update shader's node graph first
for shaderProxyObj in self.shaders():
nodeGraphObj = shaderProxyObj.inputNodeGraph()
nodeGraphObj._node_graph__set_bind_obj_update_()
return self.shaders()
def _xml_obj__get_sibling_element_list_(self):
lis = []
# node graph
for shaderProxyObj in self.shaders():
nodeGraphObjs = shaderProxyObj.inputNodeGraphs()
if nodeGraphObjs:
for nodeGraphObj in nodeGraphObjs:
if nodeGraphObj.hasBindNodes():
if not nodeGraphObj in lis:
lis.append(nodeGraphObj)
return lis
def _xml_obj__get_attribute_attach_value_str_(self):
return self.pathString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxGeometryProxy(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhGeometryProxy
):
def _initAbsMtxGeometryProxy(self, *args, **kwargs):
self._initAbsGrhGeometryProxy(*args, **kwargs)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
self.path(),
self.bindObject().typepath()
]
def _xml_obj__get_child_element_list_(self):
return self.changedProperties() + self.changedVisibilities()
# node graph ********************************************************************************************************* #
class Abs_MtxNodeGraph(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhNodeGraph
):
def _initAbsMtxNodeGraph(self, *args, **kwargs):
self._initAbsGrhNodeGraph(*args, **kwargs)
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
self.path()
]
def _xml_obj__get_child_element_list_(self):
return self.bindNodes() + self.bindOtportProxies()
def _xml_obj__get_attribute_attach_value_str_(self):
return self.pathString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxNodeGraphOtportProxy(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhNodeGraphPortProxy,
):
def _initAbsMtxNodeGraphOtportProxy(self, *args, **kwargs):
self._initAbsGrhNodeGraphPortProxy(*args, **kwargs)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
self.path(),
self.bindObject().datatype(),
self.bindObject()
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.pathString()
def _xml_obj__get_attribute_attach_list_(self):
return [
self.bindNodeGraph(),
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
# portset ************************************************************************************************************ #
class Abs_MtxPortset(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj
):
CLS_mtx__name = None
CLS_grh__node__port_stack = None
def _initAbsMtxPortset(self, *args):
self._nameObj = self.CLS_mtx__name(*args)
self._portStackObj = self.CLS_grh__node__port_stack()
self._initAbsDatXmlObj()
def restore(self):
self._portStackObj.restore()
def name(self):
return self._nameObj
def nameString(self):
"""
:return: str
"""
return self._nameObj.raw()
def setNameString(self, nameString):
"""
:param nameString: str
:return: None
"""
self._nameObj.setRaw(nameString)
def addPort(self, portObject):
self._portStackObj.addObject(portObject)
def addPorts(self, *args):
if isinstance(args[0], (list, tuple)):
_ = args[0]
else:
_ = args
[self.addPort(i) for i in _]
def ports(self):
return self._portStackObj.objects()
def hasPorts(self):
return self._portStackObj.hasObjects()
def _xml_obj__get_attribute_attach_value_str_(self):
return self.name()._xml_obj__get_attribute_attach_value_str_()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
def _xml_obj__get_attribute_list_(self):
return [
self.name()
]
def _xml_obj__get_child_element_list_(self):
return self.ports()
# geometry collection
class Abs_MtxCollection(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj
):
CLS_mtx__name = None
CLS_mtx__look__geometry_proxy_stack = None
CLS_mtx__collection_set = None
DEF_geometry_separator = None
def _initAbsMtxCollection(self, *args):
self._nameObj = self.CLS_mtx__name(*args)
self._geometryProxyStackObj = self.CLS_mtx__look__geometry_proxy_stack()
self._collectionStackObj = self.CLS_mtx__collection_set()
self._excludeGeometryStackObj = self.CLS_mtx__look__geometry_proxy_stack()
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def nameString(self):
"""
:return: str
"""
return self._nameObj.toString()
def setNameString(self, nameString):
"""
:param nameString: str
:return: None
"""
self._nameObj.setRaw(nameString)
def addGeometry(self, geometryProxyObj):
"""
:param geometryProxyObj: object of Geometry
:return:
"""
self._geometryProxyStackObj.addObject(geometryProxyObj)
def addGeometries(self, *args):
if isinstance(args[0], (list, tuple)):
_ = args[0]
else:
_ = args
[self.addGeometry(i) for i in list(_)]
def geometries(self):
"""
:return: list(object or geometry, ...)
"""
return self._geometryProxyStackObj.objects()
def hasGeometries(self):
"""
:return: bool
"""
return self._geometryProxyStackObj.hasObjects()
def geometryNameStrings(self):
"""
:return: list(str, ...)
"""
return [i.bindPathString() for i in self.geometries()]
def geometryPathStrings(self):
"""
:return: list(str, ...)
"""
return [i.bindPathString() for i in self.geometries()]
def excludeGeometrySet(self):
return self._excludeGeometryStackObj
def addExcludeGeometry(self, geometryProxyObj):
self._excludeGeometryStackObj.addObject(geometryProxyObj)
def addExcludeGeometries(self, *args):
if isinstance(args[0], (list, tuple)):
_ = args[0]
else:
_ = args
[self.addExcludeGeometry(i) for i in list(_)]
def excludeGeometries(self):
return self._excludeGeometryStackObj.objects()
def collectionSet(self):
return self._collectionStackObj
def addCollection(self, collectionObject):
"""
:param collectionObject: object of Collection
:return: None
"""
self._collectionStackObj.addObject(collectionObject)
def hasCollections(self):
"""
:return: bool
"""
return self._collectionStackObj.hasObjects()
def collections(self):
"""
:return: list(object of Collection, ...)
"""
return self._collectionStackObj.objects()
def collectionNames(self):
"""
:return: list(str, ...)
"""
return [i.nameString() for i in self.collections()]
def toString(self):
return self.nameString()
def _xml_obj__get_attribute_list_(self):
return [
self._nameObj,
self._geometryProxyStackObj,
self.collectionSet(),
self.excludeGeometrySet()
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.nameString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
# assign ************************************************************************************************************* #
class Abs_MtxAssign(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj
):
CLS_mtx__name = None
CLS_mtx__look__geometry_proxy_stack = None
DEF_geometry_separator = None
def _initAbsMtxAssign(self, *args):
lookArg, nameArg = args
self._lookObj = lookArg
self._nameObj = self.CLS_mtx__name(nameArg)
self._geometryProxyStackObj = self.CLS_mtx__look__geometry_proxy_stack(
self.nameString()
)
self._collectionObj = None
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def name(self):
return self._nameObj
def nameString(self):
"""
:return: str
"""
return self._nameObj.raw()
def setNameString(self, nameString):
"""
:param nameString: str
:return: None
"""
self._nameObj._raw__set_create_by_str_(nameString)
# **************************************************************************************************************** #
def look(self):
return self._lookObj
# **************************************************************************************************************** #
def _assign__set_geometry_proxy_add_(self, *args):
geometryProxyObj = args[0]
self._geometryProxyStackObj.addObject(geometryProxyObj)
def hasGeometry(self, *args):
return self._geometryProxyStackObj._obj_stack__get_obj_exist_(*args)
def addGeometry(self, geometryProxyObj):
"""
:param geometryProxyObj: object of Geometry
:return: None
"""
self._assign__set_geometry_proxy_add_(geometryProxyObj)
def addGeometries(self, *args):
if isinstance(args[0], (list, tuple)):
_ = args[0]
else:
_ = args
[self.addGeometry(i) for i in list(_)]
def geometries(self):
"""
:return: list(object or geometry, ...)
"""
return self._geometryProxyStackObj.objects()
def hasGeometries(self):
"""
:return: bool
"""
return self._geometryProxyStackObj.hasObjects()
def geometryNameStrings(self):
"""
:return: list(str, ...)
"""
return [i.nameString() for i in self.geometries()]
def geometryPathStrings(self):
"""
:return: list(str, ...)
"""
return [i.bindPathString() for i in self.geometries()]
# **************************************************************************************************************** #
def setCollection(self, collectionObject):
"""
:param collectionObject: object of Collection
:return: None
"""
self._collectionObj = collectionObject
def collection(self):
"""
:return: object of Collection
"""
return self._collectionObj
def _xmlElementAttaches_(self):
pass
class Abs_MtxMaterialAssign(Abs_MtxAssign):
def _initAbsMtxMaterialAssign(self, *args):
self._initAbsMtxAssign(*args)
self._materialProxyObj = None
def setMaterial(self, tgtMaterialObj):
"""
:param tgtMaterialObj: object of MaterialProxy
:return:
"""
self._materialProxyObj = tgtMaterialObj
def material(self):
"""
:return: object of ShaderSet
"""
return self._materialProxyObj
def _xmlElementAttaches_(self):
return [
self._materialProxyObj,
self._collectionObj
]
def _xml_obj__get_attribute_attach_value_str_(self):
self.nameString()
def _xml_obj__get_attribute_list_(self):
return [
self.name(),
self.material(),
self._geometryProxyStackObj,
self.collection()
]
class Abs_MtxPropertyAssign(Abs_MtxAssign):
def _initAbsMtxPropertyAssign(self, *args):
pass
class Abs_MtxPropertysetAssign(Abs_MtxAssign):
CLS_mtx__propertyset = None
def _initAbsMtxPropertysetAssign(self, *args):
self._initAbsMtxAssign(*args)
self._propertysetObj = None
def _setPropertyset_(self, *args):
if isinstance(args[0], (str, unicode)):
propertysetObject = self.CLS_mtx__propertyset(args[0])
else:
propertysetObject = args[0]
self._propertysetObj = propertysetObject
return self._propertysetObj
def setPropertyset(self, *args):
"""
:param args:
1.str
2.instance of "Propertyset"
:return: instance of "Propertyset"
"""
return self._setPropertyset_(*args)
def hasPropertyset(self):
return self._propertysetObj is not None
def propertyset(self):
"""
:return: object of Propertyset
"""
return self._propertysetObj
def _xmlElementAttaches_(self):
return [
self._propertysetObj,
self._collectionObj
]
def _xml_obj__get_attribute_list_(self):
return [
self.name(),
self.propertyset(),
self._geometryProxyStackObj,
self.collection()
]
class Abs_MtxVisibilityAssign(Abs_MtxAssign):
CLS_grh__type = None
CLS_mtx__value_visibility = None
CLS_mtx__geometry_viewer_set = None
def _initAbsMtxVisibilityAssign(self, *args):
self._initAbsMtxAssign(*args)
self._vistypeObj = None
self._visibilityValueObj = None
self._viewerGeometryStackObj = self.CLS_mtx__geometry_viewer_set()
def type(self):
return self._vistypeObj
def typeString(self):
return self._vistypeObj.toString()
def visible(self):
return self._visibilityValueObj
def assignVisibility(self, portObj):
visibilityString = portObj.portpathString()
self._vistypeObj = self.CLS_grh__type(visibilityString)
self._visibilityValueObj = portObj.value()
def addViewerGeometry(self, geometryProxyObj):
self._viewerGeometryStackObj.addObject(geometryProxyObj)
def viewerGeometries(self):
return self._viewerGeometryStackObj.objsets()
# xml ************************************************************************************************************ #
def _xmlElementAttaches_(self):
return [
self._collectionObj
]
def _xml_obj__get_attribute_list_(self):
return [
self.name(),
self.type(),
self.visible(),
self._geometryProxyStackObj,
self._viewerGeometryStackObj,
self.collection()
]
# ******************************************************************************************************************** #
class Abs_MtxLook(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj
):
CLS_mtx__look__name = None
CLS_mtx__look__namespace = None
CLS_mtx__look__assign_stack = None
CLS_mtx__look__material_assign = None
CLS_mtx__look__material_assign_stack = None
CLS_mtx__look__propertyset_assign = None
CLS_mtx__look__propertyset_assign_stack = None
CLS_mtx__look__visibility_assign = None
CLS_mtx__look__visibility_assign_stack = None
CLS_mtx__look__geometry_proxy_stack = None
def _initAbsMtxLook(self, *args):
fileArg, nameArg = args
self._fileObj = fileArg
self._nameObj = self.CLS_mtx__look__name(nameArg)
self._visibilityAssignStackObj = self.CLS_mtx__look__visibility_assign_stack(nameArg)
self._materialAssignStackObj = self.CLS_mtx__look__material_assign_stack(nameArg)
self._propertysetAssignStackObj = self.CLS_mtx__look__propertyset_assign_stack(nameArg)
self._geometryProxyStackObj = self.CLS_mtx__look__geometry_proxy_stack(nameArg)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _look__set_assigns_create_(self):
for i in self._geometryProxyStackObj.objects():
self._look__set_material_assigns_create_(i)
self._look__set_propertyset_assigns_create_(i)
self._look__set_visibility_assigns_create_(i)
def _look__set_material_assigns_create_(self, geometryProxyObj):
def addFnc_(geometryProxyObj_, materialProxyObj_):
_materialNodeObj = materialProxyObj_.bindObject()
_count = self._materialAssignStackObj.objectsCount()
_keyString = _materialNodeObj.pathString()
if self._materialAssignStackObj._obj_stack__get_obj_exist_(_keyString):
_materialAssignObj = self._materialAssignStackObj._obj_stack__get_obj_(_keyString)
else:
_materialAssignObj = self.CLS_mtx__look__material_assign(
self, u'material_assign_{}'.format(_count)
)
_materialAssignObj.setMaterial(materialProxyObj_)
self._materialAssignStackObj._obj_stack__set_obj_add_(_keyString, _materialAssignObj)
if _materialAssignObj.hasGeometry(geometryProxyObj_) is False:
_materialAssignObj.addGeometry(geometryProxyObj_)
#
# namespaceStr = self.nameString()
# materialProxyObj = geometryProxyObj.inputNodeProxy(namespaceStr)
# if materialProxyObj is not None:
# addFnc_(geometryProxyObj, materialProxyObj)
materialProxyObjList = geometryProxyObj.assignmentMaterialProxies()
for materialProxyObj in materialProxyObjList:
addFnc_(geometryProxyObj, materialProxyObj)
def _look__set_propertyset_assigns_create_(self, geometryProxyObj):
def addFnc_(geometryProxyObj_, propertysetObj_):
_count = self._propertysetAssignStackObj.objectsCount()
_keyString = geometryProxyObj_.bindPathString()
if self._propertysetAssignStackObj._obj_stack__get_obj_exist_(_keyString):
_propertysetAssignObj = self._propertysetAssignStackObj._obj_stack__get_obj_(_keyString)
else:
_propertysetAssignObj = self.CLS_mtx__look__propertyset_assign(
self, propertysetObj_.nameString()
)
# _materialAssignObj = self.CLS_mtx__look__material_assign(
# self, u'material_assign_{}'.format(_count)
# )
self._propertysetAssignStackObj._obj_stack__set_obj_add_(_keyString, _propertysetAssignObj)
_propertysetAssignObj.setPropertyset(propertysetObj_)
if _propertysetAssignObj.hasGeometry(geometryProxyObj_) is False:
_propertysetAssignObj.addGeometry(geometryProxyObj_)
bindPortsetNamespaceStr = geometryProxyObj.bindPortsetNamespaceString()
propertysetObj = geometryProxyObj._geometry_proxy__set_propertyset_update_(bindPortsetNamespaceStr)
if propertysetObj.hasPorts():
addFnc_(geometryProxyObj, propertysetObj)
def _look__set_visibility_assigns_create_(self, geometryProxyObj):
def addFnc_(geometryProxyObj_, portProxyObj_):
_portObject = portProxyObj_.bindObject()
_count = self._visibilityAssignStackObj.objectsCount()
_keyString = _portObject.portpathString()
if self._visibilityAssignStackObj._obj_stack__get_obj_exist_(_keyString):
_visibilityObject = self._visibilityAssignStackObj._obj_stack__get_obj_(_keyString)
else:
_visibilityObject = self.CLS_mtx__look__visibility_assign(
self, u'visibility_assign_{}'.format(_count)
)
_visibilityObject.assignVisibility(_portObject)
self._visibilityAssignStackObj._obj_stack__set_obj_add_(_keyString, _visibilityObject)
if _visibilityObject.hasGeometry(geometryProxyObj_) is False:
_visibilityObject.addGeometry(geometryProxyObj_)
geometryVisibilities = geometryProxyObj.changedVisibilities()
if geometryVisibilities:
[addFnc_(geometryProxyObj, i) for i in geometryVisibilities]
# **************************************************************************************************************** #
def _look__get_geometry_namespace_str_(self):
return self.nameString()
def geometryNamespaceString(self):
return self._look__get_geometry_namespace_str_()
# **************************************************************************************************************** #
def file(self):
return self._fileObj
# **************************************************************************************************************** #
def name(self):
return self._nameObj
def nameString(self):
return self._nameObj.toString()
# **************************************************************************************************************** #
def geometries(self):
return self._geometryProxyStackObj.objects()
def hasGeometries(self):
return self._geometryProxyStackObj.hasObjects()
def _look__set_geometry_proxy_add_(self, *args):
geometryProxyObj = args[0]
if geometryProxyObj.namespace().isRoot() is True:
geometryNamespaceStr = self.geometryNamespaceString()
geometryProxyObj.setNamespaceString(geometryNamespaceStr)
# add Variant
# geometryObj = geometryProxyObj.bindObject()
# geometryObj.addVariantObject(self.nameString())
# add geometry
self._geometryProxyStackObj.addObject(geometryProxyObj)
def addGeometry(self, geometryProxyObj):
self._look__set_geometry_proxy_add_(geometryProxyObj)
def addGeometries(self, *args):
if isinstance(args[0], (tuple, list)):
[self.addGeometry(i) for i in list(args[0])]
else:
[self.addGeometry(i) for i in list(args)]
def geometry(self, geometryString):
return self._geometryProxyStackObj.object(geometryString)
def hasGeometry(self, *args):
return self._geometryProxyStackObj._obj_stack__get_obj_exist_(*args)
# **************************************************************************************************************** #
def materialAssigns(self):
return self._materialAssignStackObj.objects()
def propertysetAssigns(self):
return self._propertysetAssignStackObj.objects()
def visibilityAssigns(self):
return self._visibilityAssignStackObj.objects()
# **************************************************************************************************************** #
def hasAssigns(self):
return self.assigns() != []
def assigns(self):
return self.materialAssigns() + self.propertysetAssigns() + self.visibilityAssigns()
def _xmlElementAttaches_(self):
lis = []
for assignObject in self.assigns():
for xmlObject in assignObject._xmlElementAttaches_():
if xmlObject is not None:
if xmlObject not in lis:
lis.append(xmlObject)
return lis
def _xml_obj__get_attribute_list_(self):
return [
self._nameObj
]
def _xml_obj__get_child_element_list_(self):
self._look__set_assigns_create_()
return self.assigns()
def _xml_obj__get_sibling_element_list_(self):
return self._xmlElementAttaches_()
class Abs_MtxFile(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj
):
CLS_mtx__file__path = None
CLS_mtx__file__version = None
CLS_mtx__file__reference_stack = None
CLS_mtx__file__reference = None
CLS_mtx__file__look_stack = None
CLS_mtx__file__look = None
VAR_mtx__file__version = None
def __init__(self, *args, **kwargs):
pass
def _initAbsMtxFile(self, *args):
self._filepathObj = self.CLS_mtx__file__path(*args)
self._versionObj = self.CLS_mtx__file__version(self.VAR_mtx__file__version)
self._referenceStackObj = self.CLS_mtx__file__reference_stack()
self._lookStackObj = self.CLS_mtx__file__look_stack(self)
self._initAbsDatXmlObj()
def _file__set_look_add_(self, *args):
if args:
_ = args[0]
if isinstance(_, (str, unicode)):
lookStr = _
lookObject = self.CLS_mtx__file__look(self, lookStr)
elif isinstance(_, self.CLS_mtx__file__look):
lookObject = _
else:
raise TypeError
else:
lookObject = self.CLS_mtx__file__look(self, u'default_look')
self._lookStackObj.addObject(lookObject)
return lookObject
def _file__set_reference_add_(self, *args):
if self.CLS_mtx__file__reference is not None:
referenceCls = self.CLS_mtx__file__reference
else:
referenceCls = self.__class__
if isinstance(args[0], (str, unicode)):
fileObj = referenceCls(args[0])
elif isinstance(args[0], referenceCls):
fileObj = args[0]
else:
fileObj = referenceCls(u'default')
keyString = fileObj.fullpathFilename()
self._referenceStackObj._obj_stack__set_obj_add_(keyString, fileObj)
def filepath(self):
return self._filepathObj
def fullpathFilename(self):
return self._filepathObj.toString()
def version(self):
return self._versionObj
def versionString(self):
return self._versionObj.toString()
def addReference(self, fileObject):
self._file__set_reference_add_(fileObject)
def references(self):
return self._referenceStackObj.objects()
def reference(self, fileString):
return self._referenceStackObj.object(fileString)
def hasLook(self, lookStr):
return self._lookStackObj._obj_stack__get_obj_exist_(lookStr)
def addLook(self, *args):
"""
:param args:
1.str
2.instance of "Look"
:return:
"""
return self._file__set_look_add_(*args)
def looks(self):
return self._lookStackObj.objects()
def look(self, lookStr):
return self._lookStackObj.object(lookStr)
def lookIndex(self, *args):
return self._lookStackObj._obj_stack__get_obj_index_(*args)
def save(self):
xmlDoc = self.__str__()
bscMethods.OsFile.write(
self.fullpathFilename(), xmlDoc
)
def _xml_obj__get_attribute_list_(self):
return [
self.version()
]
def _xml_obj__get_child_element_list_(self):
return self.references() + self.looks()
class Abs_MtxReference(Abs_MtxFile):
def _initAbsMtxReference(self, *args):
self._initAbsMtxFile(*args)
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
self._filepathObj
]
# ******************************************************************************************************************** #
class Abs_MtxTrsLook(Abs_MtxBasic):
CLS_mtx__trs_look__tgt_look = None
CLS_mtx__trs_look__trs_geometry_proxy = None
def _initAbsMtxTrsLook(self, *args):
trsFileArg, tgtLookArg = args
self._trsFileObj = trsFileArg
tgtFileObj = trsFileArg.tgtFile()
self._tgtLookObj = self.CLS_mtx__trs_look__tgt_look(tgtFileObj, tgtLookArg)
def trsFile(self):
return self._trsFileObj
def tgtLook(self):
return self._tgtLookObj
def addSrcGeometry(self, srcNodepathStr):
# geometry namespace = look name
namespaceStr = self.tgtLook().nameString()
trsGeometryProxyObj = self.CLS_mtx__trs_look__trs_geometry_proxy(
srcNodepathStr,
namespace=namespaceStr
)
# target
tgtGeometryProxyObj = trsGeometryProxyObj.tgtNodeProxy()
if self.tgtLook().hasGeometry(tgtGeometryProxyObj) is False:
self.tgtLook().addGeometry(tgtGeometryProxyObj)
else:
bscMethods.PyMessage.traceWarning(
u'''Geometry "{}" is Exist.'''.format(tgtGeometryProxyObj.pathString())
)
def addSrcGeometries(self, *args):
if isinstance(args[0], (list, tuple)):
_ = args[0]
else:
_ = args
[self.addSrcGeometry(i) for i in _]
def _mtx__trs_look__set_material_assign_add_(self, *args):
pass
def addAssign(self, *args):
self._mtx__trs_look__set_material_assign_add_(*args)
def __str__(self):
return self._tgtLookObj.__str__()
# ******************************************************************************************************************** #
class Abs_MtxTrsFile(Abs_MtxBasic):
CLS_mtx__trs_file__tgt_file = None
CLS_mtx__trs_file__trs_look = None
IST_mtx__trs_file__trs_obj_queue = None
def _initAbsMtxTrsFile(self, *args):
fileString = args[0]
self._tgtFileObj = self.CLS_mtx__trs_file__tgt_file(fileString)
self._tgtFileObj.addReference(
u'materialx/arnold/nodedefs.mtlx'
)
def tgtFile(self):
return self._tgtFileObj
def addLook(self, lookStr):
trsLookObj = self.CLS_mtx__trs_file__trs_look(self, lookStr)
if self._tgtFileObj.hasLook(lookStr) is False:
tgtLookObk = trsLookObj.tgtLook()
self._tgtFileObj.addLook(tgtLookObk)
else:
bscMethods.PyMessage.traceWarning(
u'''Look "{}" is Exist.'''.format(lookStr)
)
return trsLookObj
def tgtLook(self, lookStr):
return self._tgtFileObj.look(lookStr)
def tgtLooks(self):
return self._tgtFileObj.looks()
def save(self):
for i in self.IST_mtx__trs_file__trs_obj_queue.nodes():
i._grh__trs_node__set_after_expressions_run_()
self._tgtFileObj.save()
bscMethods.PyMessage.traceResult(
u'save file "{}"'.format(
self._tgtFileObj.fullpathFilename()
)
)
def __str__(self):
for i in self.IST_mtx__trs_file__trs_obj_queue.nodes():
i._grh__trs_node__set_after_expressions_run_()
return self._tgtFileObj.__str__()
| <filename>workspace/module/python-2.7/LxMtx/mtxObjAbs.py
# coding:utf-8
from LxBasic import bscMethods
from LxData import datObjAbs
from LxGraphic import grhObjAbs
from . import mtxCfg
class Abs_MtxBasic(mtxCfg.MtxUtility):
pass
# ******************************************************************************************************************** #
class Abs_MtxObjLoader(grhObjAbs.Abs_GrhObjLoader):
def _initAbsMtxObjLoader(self, *args):
self._initAbsGrhObjLoader(*args)
# **************************************************************************************************************** #
@classmethod
def _obj_loader_cls__set_node_raw_create_(cls, *args):
(
nodeRawDict,
typepathStr,
orig_node_raw_dict,
orig_otport_raw_list_dict,
orig_child_port_raw_list_dict
) = args
_datatypeStr = orig_node_raw_dict[cls.DEF_grh__key_node_datatype]
# property
nodeRawDict[cls.DEF_grh__key_node_typepath] = typepathStr
nodeRawDict[cls.DEF_grh__key_node_datatype] = _datatypeStr
# port
_portRawList = []
_orig_port_raw_list = orig_node_raw_dict[cls.DEF_grh__key_port]
cls._obj_loader_cls__set_ports_create_(_portRawList, _orig_port_raw_list, orig_child_port_raw_list_dict)
_orig_otport_raw_list = orig_otport_raw_list_dict.get(_datatypeStr, [])
cls._obj_loader_cls__set_ports_create_(_portRawList, _orig_otport_raw_list, orig_child_port_raw_list_dict)
nodeRawDict[cls.DEF_grh__key_port] = _portRawList
# **************************************************************************************************************** #
@classmethod
def _obj_loader_cls__set_ports_create_(cls, *args):
portRawList, orig_port_raw_list, orig_child_port_raw_list_dict = args
for orig_port_raw in orig_port_raw_list:
cls._obj_loader_cls__set_port_create_(portRawList, orig_port_raw, orig_child_port_raw_list_dict)
@classmethod
def _obj_loader_cls__set_port_create_(cls, *args):
portRawList, orig_port_raw, orig_child_port_raw_list_dict = args
_portpathStr = orig_port_raw[cls.DEF_grh__key_portpath]
if cls.DEF_grh__key_porttype in orig_port_raw:
_porttypeStr = orig_port_raw[cls.DEF_grh__key_porttype]
else:
_porttypeStr = None
_datatypeStr = orig_port_raw[cls.DEF_grh__key_port_datatype]
_portrawStr = orig_port_raw[cls.DEF_grh__key_portraw]
_assignStr = orig_port_raw[cls.DEF_grh__key_assign]
_childStrList = []
# add parent first
cls._obj_loader_cls__set_port_raw_add_(
portRawList,
portpath=_portpathStr,
porttype=_porttypeStr,
datatype=_datatypeStr,
portraw=_portrawStr,
assign=_assignStr,
children=_childStrList
)
orig_child_port_raw_list = orig_child_port_raw_list_dict.get(_datatypeStr, [])
cls._obj_loader_cls__set_port_children_create_(
portRawList, _childStrList, orig_port_raw, orig_child_port_raw_list
)
@classmethod
def _obj_loader_cls__set_port_children_create_(cls, *args):
portRawList, childStrList, orig_parent_port_raw, orig_child_port_raw_list = args
for _index, _orig_child_port_raw in enumerate(orig_child_port_raw_list):
cls._obj_loader_cls__set_port_child_create_(
portRawList, childStrList, orig_parent_port_raw, _orig_child_port_raw,
_index
)
@classmethod
def _obj_loader_cls__set_port_child_create_(cls, *args):
portRawList, childStrList, origParentPortRaw, origPortRaw, childIndex = args
_parentPortpathStr = origParentPortRaw[cls.DEF_grh__key_portpath]
_parentPorttypeStr = origParentPortRaw[cls.DEF_grh__key_port_datatype]
parentPortrawString = origParentPortRaw[cls.DEF_grh__key_portraw]
parentAssignString = origParentPortRaw[cls.DEF_grh__key_assign]
_formatString = origPortRaw[cls.DEF_grh__key_format]
_portpathStr = _formatString.format(
**{
cls.DEF_grh__key_portpath: _parentPortpathStr
}
)
_datatypeStr = origPortRaw[cls.DEF_grh__key_port_datatype]
if parentPortrawString:
_portrawStr = parentPortrawString.split(u',')[childIndex].rstrip().lstrip()
else:
_portrawStr = origPortRaw[cls.DEF_grh__key_portraw]
if parentAssignString == cls.DEF_grh__keyword__gnport:
_portAssignString = cls.DEF_grh__keyword__gnport_channel
if parentAssignString == cls.DEF_grh__keyword__inport:
_portAssignString = cls.DEF_grh__keyword__inport_channel
elif parentAssignString == cls.DEF_grh__keyword__otport:
_portAssignString = cls.DEF_grh__keyword__otport_channel
else:
raise TypeError()
cls._obj_loader_cls__set_port_raw_add_(
portRawList,
portpath=_portpathStr,
porttype=_parentPorttypeStr,
datatype=_parentPorttypeStr,
portraw=_portrawStr,
assign=_portAssignString,
parent=_parentPortpathStr,
children=[]
)
childStrList.append(_portpathStr)
# **************************************************************************************************************** #
@classmethod
def _grh__obj_loader_cls__get_definition_node_raw_(cls, *args):
out_node_raw_dict = cls.CLS_ordered_dict()
cls._obj_loader_cls__set_node_raw_create_(
out_node_raw_dict,
*args
)
return out_node_raw_dict
# ******************************************************************************************************************** #
class Abs_MtxObjQueryBuilder(grhObjAbs.Abs_GrhObjQueryrawCreator):
def _initAbsMtxObjQueryBuilder(self, *args):
self._initAbsGrhObjQueryBuilder(*args)
# **************************************************************************************************************** #
def _queryraw_loader__set_build_(self):
self._nodeRaws = bscMethods.OsJsonFile.read(
self.VAR_grh__node_file
) or {}
self._materialRaws = bscMethods.OsJsonFile.read(
self.VAR_grh__material_file
) or {}
self._geometryRaws = bscMethods.OsJsonFile.read(
self.VAR_grh__geometry_file
) or {}
self._origOtportRaw = bscMethods.OsJsonFile.read(
self.VAR_grh__output_file
) or {}
self._origPortChildRaw = bscMethods.OsJsonFile.read(
self.VAR_grh__port_child_file
) or {}
self._origNodeRaws = self.CLS_ordered_dict()
for i in [
self._nodeRaws, self._materialRaws, self._geometryRaws
]:
self._origNodeRaws.update(i)
# **************************************************************************************************************** #
def _queryraw_loader__get_node_raw_(self, *args):
typepathStr = args[0]
if typepathStr in self._origNodeRaws:
origNodeRaw = self._origNodeRaws[typepathStr]
return self.CLS_grh__obj_query_creator__obj_loader.getDefinitionNodeRaw(
typepathStr, origNodeRaw, self._origOtportRaw, self._origPortChildRaw
)
# **************************************************************************************************************** #
def _queryraw_loader__get_category_exist_(self, *args):
typepathStr = args[0]
return typepathStr in self._origNodeRaws
def _queryraw_loader__get_categories_(self):
return self._origNodeRaws.keys()
# ******************************************************************************************************************** #
class Abs_MtxObjQueue(grhObjAbs.Abs_GrhObjQueue):
def _initAbsMtxObjQueue(self, *args):
self._initAbsGrhObjQueue(*args)
# raw **************************************************************************************************************** #
class Abs_MtxRaw(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatRaw
):
def _initAbsMtxRaw(self, *args):
self._initAbsDatRaw(*args)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxDatatype(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatDatatype
):
def _initAbsMtxDatatype(self, *args):
self._initAbsDatDatatype(*args)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxObjProxyNamespace(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatObjNamespace
):
def _initAbsMtxObjProxyNamespace(self, *args):
self._initAbsDatObjNamespace(*args)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxName(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatName
):
def _initAbsMtxName(self, *args):
self._initAbsDatName(*args)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxObjTypename(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatObjName,
):
def _initAbsMtxObjTypename(self, *args):
self._initAbsDatObjName(*args)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxObjName(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatObjName,
):
def _initAbsMtxObjName(self, *args):
self._initAbsDatObjName(*args)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxPath(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatObjPath
):
def _initAbsMtxPath(self, *args):
self._initAbsDatObjPath(*args)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxAttrpath(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatObjComppath
):
def _initAbsMtxAttrpath(self, *args):
self._initAbsDatObjComppath(*args)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
[('raw', self.raw())]
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
# object set ********************************************************************************************************* #
class Abs_MtxObjSet(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhObjStack
):
def _initAbsMtxObjSet(self, *args):
self._initAbsGrhObjStack(*args)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
# value ************************************************************************************************************** #
class Abs_MtxValue(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
datObjAbs.Abs_DatValue
):
def _initAbsMtxValue(self, *args):
self._initAbsDatValue(*args)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
self.datatype(), self.data()
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.toString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
# ******************************************************************************************************************** #
class Abs_MtxPort(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhPort
):
def _initAbsMtxPort(self, *args, **kwargs):
self._initAbsGrhPort(*args, **kwargs)
self._initAbsDatXmlObj()
self._proxyObj = None
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_attach_value_str_(self):
return self.portpathString()
def _xml_obj__get_attribute_attach_list_(self):
if self.isChannel() is True:
# <... nodename="nodepath" member="parent portpath" channel="portname" />
return [
self.parent(),
(self._xml_obj__get_attribute_attach_key_str_(), self.portnameString())
]
else:
# <... nodename = "nodepath" member = "portpath" />
return [
self.node(),
(self._xml_obj__get_attribute_attach_key_str_(), self.portpathString())
]
def _xml_obj__get_attribute_list_(self):
return [
self.portpath(),
self.datatype(),
self.portgiven()
]
class Abs_MtxNode(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhNode
):
def _initAbsMtxNode(self, *args, **kwargs):
self._initAbsGrhNode(*args, **kwargs)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_element_prefix_str(self):
return self.typepathString()
def _xml_obj__get_attribute_list_(self):
return [
self.path(),
self.datatype()
]
def _xml_obj__get_child_element_list_(self):
return self.changedInport()
def _xml_obj__get_attribute_attach_value_str_(self):
return self.pathString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxConnector(
grhObjAbs.Abs_GrhConnector
):
def _initAbsMtxConnector(self, *args):
self._initAbsGrhConnector(*args)
# port proxy ********************************************************************************************************* #
class Abs_MtxPortProxy(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhPortProxy,
):
def _initAbsMtxPortProxy(self, *args, **kwargs):
self._initAbsGrhPortProxy(*args, **kwargs)
self._initAbsDatXmlObj()
def _xml_obj__get_attribute_list_(self):
return [
self.bindObject().portpath(),
self.bindObject().datatype(),
self.bindPortgiven()
]
class Abs_MtxShaderProxy(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhShaderProxy
):
def _initAbsMtxShaderProxy(self, *args, **kwargs):
self._initAbsGrhShaderProxy(*args, **kwargs)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
self.path(),
self.bindObject().typepath(),
[(u'context', self._shader_proxy__get_material_context_())]
]
def _xml_obj__get_child_element_list_(self):
return self.changedBindInportProxies()
class Abs_MtxMaterialProxy(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhMaterialProxy
):
def _initAbsMtxMaterialProxy(self, *args, **kwargs):
self._initAbsGrhMaterialProxy(*args, **kwargs)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
self.path()
]
def _xml_obj__get_child_element_list_(self):
# update shader's node graph first
for shaderProxyObj in self.shaders():
nodeGraphObj = shaderProxyObj.inputNodeGraph()
nodeGraphObj._node_graph__set_bind_obj_update_()
return self.shaders()
def _xml_obj__get_sibling_element_list_(self):
lis = []
# node graph
for shaderProxyObj in self.shaders():
nodeGraphObjs = shaderProxyObj.inputNodeGraphs()
if nodeGraphObjs:
for nodeGraphObj in nodeGraphObjs:
if nodeGraphObj.hasBindNodes():
if not nodeGraphObj in lis:
lis.append(nodeGraphObj)
return lis
def _xml_obj__get_attribute_attach_value_str_(self):
return self.pathString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxGeometryProxy(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhGeometryProxy
):
def _initAbsMtxGeometryProxy(self, *args, **kwargs):
self._initAbsGrhGeometryProxy(*args, **kwargs)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
self.path(),
self.bindObject().typepath()
]
def _xml_obj__get_child_element_list_(self):
return self.changedProperties() + self.changedVisibilities()
# node graph ********************************************************************************************************* #
class Abs_MtxNodeGraph(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhNodeGraph
):
def _initAbsMtxNodeGraph(self, *args, **kwargs):
self._initAbsGrhNodeGraph(*args, **kwargs)
# **************************************************************************************************************** #
def _xml_obj__get_attribute_list_(self):
return [
self.path()
]
def _xml_obj__get_child_element_list_(self):
return self.bindNodes() + self.bindOtportProxies()
def _xml_obj__get_attribute_attach_value_str_(self):
return self.pathString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
class Abs_MtxNodeGraphOtportProxy(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj,
grhObjAbs.Abs_GrhNodeGraphPortProxy,
):
def _initAbsMtxNodeGraphOtportProxy(self, *args, **kwargs):
self._initAbsGrhNodeGraphPortProxy(*args, **kwargs)
self._initAbsDatXmlObj()
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
self.path(),
self.bindObject().datatype(),
self.bindObject()
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.pathString()
def _xml_obj__get_attribute_attach_list_(self):
return [
self.bindNodeGraph(),
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
# portset ************************************************************************************************************ #
class Abs_MtxPortset(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj
):
CLS_mtx__name = None
CLS_grh__node__port_stack = None
def _initAbsMtxPortset(self, *args):
self._nameObj = self.CLS_mtx__name(*args)
self._portStackObj = self.CLS_grh__node__port_stack()
self._initAbsDatXmlObj()
def restore(self):
self._portStackObj.restore()
def name(self):
return self._nameObj
def nameString(self):
"""
:return: str
"""
return self._nameObj.raw()
def setNameString(self, nameString):
"""
:param nameString: str
:return: None
"""
self._nameObj.setRaw(nameString)
def addPort(self, portObject):
self._portStackObj.addObject(portObject)
def addPorts(self, *args):
if isinstance(args[0], (list, tuple)):
_ = args[0]
else:
_ = args
[self.addPort(i) for i in _]
def ports(self):
return self._portStackObj.objects()
def hasPorts(self):
return self._portStackObj.hasObjects()
def _xml_obj__get_attribute_attach_value_str_(self):
return self.name()._xml_obj__get_attribute_attach_value_str_()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
def _xml_obj__get_attribute_list_(self):
return [
self.name()
]
def _xml_obj__get_child_element_list_(self):
return self.ports()
# geometry collection
class Abs_MtxCollection(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj
):
CLS_mtx__name = None
CLS_mtx__look__geometry_proxy_stack = None
CLS_mtx__collection_set = None
DEF_geometry_separator = None
def _initAbsMtxCollection(self, *args):
self._nameObj = self.CLS_mtx__name(*args)
self._geometryProxyStackObj = self.CLS_mtx__look__geometry_proxy_stack()
self._collectionStackObj = self.CLS_mtx__collection_set()
self._excludeGeometryStackObj = self.CLS_mtx__look__geometry_proxy_stack()
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def nameString(self):
"""
:return: str
"""
return self._nameObj.toString()
def setNameString(self, nameString):
"""
:param nameString: str
:return: None
"""
self._nameObj.setRaw(nameString)
def addGeometry(self, geometryProxyObj):
"""
:param geometryProxyObj: object of Geometry
:return:
"""
self._geometryProxyStackObj.addObject(geometryProxyObj)
def addGeometries(self, *args):
if isinstance(args[0], (list, tuple)):
_ = args[0]
else:
_ = args
[self.addGeometry(i) for i in list(_)]
def geometries(self):
"""
:return: list(object or geometry, ...)
"""
return self._geometryProxyStackObj.objects()
def hasGeometries(self):
"""
:return: bool
"""
return self._geometryProxyStackObj.hasObjects()
def geometryNameStrings(self):
"""
:return: list(str, ...)
"""
return [i.bindPathString() for i in self.geometries()]
def geometryPathStrings(self):
"""
:return: list(str, ...)
"""
return [i.bindPathString() for i in self.geometries()]
def excludeGeometrySet(self):
return self._excludeGeometryStackObj
def addExcludeGeometry(self, geometryProxyObj):
self._excludeGeometryStackObj.addObject(geometryProxyObj)
def addExcludeGeometries(self, *args):
if isinstance(args[0], (list, tuple)):
_ = args[0]
else:
_ = args
[self.addExcludeGeometry(i) for i in list(_)]
def excludeGeometries(self):
return self._excludeGeometryStackObj.objects()
def collectionSet(self):
return self._collectionStackObj
def addCollection(self, collectionObject):
"""
:param collectionObject: object of Collection
:return: None
"""
self._collectionStackObj.addObject(collectionObject)
def hasCollections(self):
"""
:return: bool
"""
return self._collectionStackObj.hasObjects()
def collections(self):
"""
:return: list(object of Collection, ...)
"""
return self._collectionStackObj.objects()
def collectionNames(self):
"""
:return: list(str, ...)
"""
return [i.nameString() for i in self.collections()]
def toString(self):
return self.nameString()
def _xml_obj__get_attribute_list_(self):
return [
self._nameObj,
self._geometryProxyStackObj,
self.collectionSet(),
self.excludeGeometrySet()
]
def _xml_obj__get_attribute_attach_value_str_(self):
return self.nameString()
def _xml_obj__get_attribute_attach_list_(self):
return [
(self._xml_obj__get_attribute_attach_key_str_(), self._xml_obj__get_attribute_attach_value_str_())
]
# assign ************************************************************************************************************* #
class Abs_MtxAssign(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj
):
CLS_mtx__name = None
CLS_mtx__look__geometry_proxy_stack = None
DEF_geometry_separator = None
def _initAbsMtxAssign(self, *args):
lookArg, nameArg = args
self._lookObj = lookArg
self._nameObj = self.CLS_mtx__name(nameArg)
self._geometryProxyStackObj = self.CLS_mtx__look__geometry_proxy_stack(
self.nameString()
)
self._collectionObj = None
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def name(self):
return self._nameObj
def nameString(self):
"""
:return: str
"""
return self._nameObj.raw()
def setNameString(self, nameString):
"""
:param nameString: str
:return: None
"""
self._nameObj._raw__set_create_by_str_(nameString)
# **************************************************************************************************************** #
def look(self):
return self._lookObj
# **************************************************************************************************************** #
def _assign__set_geometry_proxy_add_(self, *args):
geometryProxyObj = args[0]
self._geometryProxyStackObj.addObject(geometryProxyObj)
def hasGeometry(self, *args):
return self._geometryProxyStackObj._obj_stack__get_obj_exist_(*args)
def addGeometry(self, geometryProxyObj):
"""
:param geometryProxyObj: object of Geometry
:return: None
"""
self._assign__set_geometry_proxy_add_(geometryProxyObj)
def addGeometries(self, *args):
if isinstance(args[0], (list, tuple)):
_ = args[0]
else:
_ = args
[self.addGeometry(i) for i in list(_)]
def geometries(self):
"""
:return: list(object or geometry, ...)
"""
return self._geometryProxyStackObj.objects()
def hasGeometries(self):
"""
:return: bool
"""
return self._geometryProxyStackObj.hasObjects()
def geometryNameStrings(self):
"""
:return: list(str, ...)
"""
return [i.nameString() for i in self.geometries()]
def geometryPathStrings(self):
"""
:return: list(str, ...)
"""
return [i.bindPathString() for i in self.geometries()]
# **************************************************************************************************************** #
def setCollection(self, collectionObject):
"""
:param collectionObject: object of Collection
:return: None
"""
self._collectionObj = collectionObject
def collection(self):
"""
:return: object of Collection
"""
return self._collectionObj
def _xmlElementAttaches_(self):
pass
class Abs_MtxMaterialAssign(Abs_MtxAssign):
def _initAbsMtxMaterialAssign(self, *args):
self._initAbsMtxAssign(*args)
self._materialProxyObj = None
def setMaterial(self, tgtMaterialObj):
"""
:param tgtMaterialObj: object of MaterialProxy
:return:
"""
self._materialProxyObj = tgtMaterialObj
def material(self):
"""
:return: object of ShaderSet
"""
return self._materialProxyObj
def _xmlElementAttaches_(self):
return [
self._materialProxyObj,
self._collectionObj
]
def _xml_obj__get_attribute_attach_value_str_(self):
self.nameString()
def _xml_obj__get_attribute_list_(self):
return [
self.name(),
self.material(),
self._geometryProxyStackObj,
self.collection()
]
class Abs_MtxPropertyAssign(Abs_MtxAssign):
def _initAbsMtxPropertyAssign(self, *args):
pass
class Abs_MtxPropertysetAssign(Abs_MtxAssign):
CLS_mtx__propertyset = None
def _initAbsMtxPropertysetAssign(self, *args):
self._initAbsMtxAssign(*args)
self._propertysetObj = None
def _setPropertyset_(self, *args):
if isinstance(args[0], (str, unicode)):
propertysetObject = self.CLS_mtx__propertyset(args[0])
else:
propertysetObject = args[0]
self._propertysetObj = propertysetObject
return self._propertysetObj
def setPropertyset(self, *args):
"""
:param args:
1.str
2.instance of "Propertyset"
:return: instance of "Propertyset"
"""
return self._setPropertyset_(*args)
def hasPropertyset(self):
return self._propertysetObj is not None
def propertyset(self):
"""
:return: object of Propertyset
"""
return self._propertysetObj
def _xmlElementAttaches_(self):
return [
self._propertysetObj,
self._collectionObj
]
def _xml_obj__get_attribute_list_(self):
return [
self.name(),
self.propertyset(),
self._geometryProxyStackObj,
self.collection()
]
class Abs_MtxVisibilityAssign(Abs_MtxAssign):
CLS_grh__type = None
CLS_mtx__value_visibility = None
CLS_mtx__geometry_viewer_set = None
def _initAbsMtxVisibilityAssign(self, *args):
self._initAbsMtxAssign(*args)
self._vistypeObj = None
self._visibilityValueObj = None
self._viewerGeometryStackObj = self.CLS_mtx__geometry_viewer_set()
def type(self):
return self._vistypeObj
def typeString(self):
return self._vistypeObj.toString()
def visible(self):
return self._visibilityValueObj
def assignVisibility(self, portObj):
visibilityString = portObj.portpathString()
self._vistypeObj = self.CLS_grh__type(visibilityString)
self._visibilityValueObj = portObj.value()
def addViewerGeometry(self, geometryProxyObj):
self._viewerGeometryStackObj.addObject(geometryProxyObj)
def viewerGeometries(self):
return self._viewerGeometryStackObj.objsets()
# xml ************************************************************************************************************ #
def _xmlElementAttaches_(self):
return [
self._collectionObj
]
def _xml_obj__get_attribute_list_(self):
return [
self.name(),
self.type(),
self.visible(),
self._geometryProxyStackObj,
self._viewerGeometryStackObj,
self.collection()
]
# ******************************************************************************************************************** #
class Abs_MtxLook(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj
):
CLS_mtx__look__name = None
CLS_mtx__look__namespace = None
CLS_mtx__look__assign_stack = None
CLS_mtx__look__material_assign = None
CLS_mtx__look__material_assign_stack = None
CLS_mtx__look__propertyset_assign = None
CLS_mtx__look__propertyset_assign_stack = None
CLS_mtx__look__visibility_assign = None
CLS_mtx__look__visibility_assign_stack = None
CLS_mtx__look__geometry_proxy_stack = None
def _initAbsMtxLook(self, *args):
fileArg, nameArg = args
self._fileObj = fileArg
self._nameObj = self.CLS_mtx__look__name(nameArg)
self._visibilityAssignStackObj = self.CLS_mtx__look__visibility_assign_stack(nameArg)
self._materialAssignStackObj = self.CLS_mtx__look__material_assign_stack(nameArg)
self._propertysetAssignStackObj = self.CLS_mtx__look__propertyset_assign_stack(nameArg)
self._geometryProxyStackObj = self.CLS_mtx__look__geometry_proxy_stack(nameArg)
self._initAbsDatXmlObj()
# **************************************************************************************************************** #
def _look__set_assigns_create_(self):
for i in self._geometryProxyStackObj.objects():
self._look__set_material_assigns_create_(i)
self._look__set_propertyset_assigns_create_(i)
self._look__set_visibility_assigns_create_(i)
def _look__set_material_assigns_create_(self, geometryProxyObj):
def addFnc_(geometryProxyObj_, materialProxyObj_):
_materialNodeObj = materialProxyObj_.bindObject()
_count = self._materialAssignStackObj.objectsCount()
_keyString = _materialNodeObj.pathString()
if self._materialAssignStackObj._obj_stack__get_obj_exist_(_keyString):
_materialAssignObj = self._materialAssignStackObj._obj_stack__get_obj_(_keyString)
else:
_materialAssignObj = self.CLS_mtx__look__material_assign(
self, u'material_assign_{}'.format(_count)
)
_materialAssignObj.setMaterial(materialProxyObj_)
self._materialAssignStackObj._obj_stack__set_obj_add_(_keyString, _materialAssignObj)
if _materialAssignObj.hasGeometry(geometryProxyObj_) is False:
_materialAssignObj.addGeometry(geometryProxyObj_)
#
# namespaceStr = self.nameString()
# materialProxyObj = geometryProxyObj.inputNodeProxy(namespaceStr)
# if materialProxyObj is not None:
# addFnc_(geometryProxyObj, materialProxyObj)
materialProxyObjList = geometryProxyObj.assignmentMaterialProxies()
for materialProxyObj in materialProxyObjList:
addFnc_(geometryProxyObj, materialProxyObj)
def _look__set_propertyset_assigns_create_(self, geometryProxyObj):
def addFnc_(geometryProxyObj_, propertysetObj_):
_count = self._propertysetAssignStackObj.objectsCount()
_keyString = geometryProxyObj_.bindPathString()
if self._propertysetAssignStackObj._obj_stack__get_obj_exist_(_keyString):
_propertysetAssignObj = self._propertysetAssignStackObj._obj_stack__get_obj_(_keyString)
else:
_propertysetAssignObj = self.CLS_mtx__look__propertyset_assign(
self, propertysetObj_.nameString()
)
# _materialAssignObj = self.CLS_mtx__look__material_assign(
# self, u'material_assign_{}'.format(_count)
# )
self._propertysetAssignStackObj._obj_stack__set_obj_add_(_keyString, _propertysetAssignObj)
_propertysetAssignObj.setPropertyset(propertysetObj_)
if _propertysetAssignObj.hasGeometry(geometryProxyObj_) is False:
_propertysetAssignObj.addGeometry(geometryProxyObj_)
bindPortsetNamespaceStr = geometryProxyObj.bindPortsetNamespaceString()
propertysetObj = geometryProxyObj._geometry_proxy__set_propertyset_update_(bindPortsetNamespaceStr)
if propertysetObj.hasPorts():
addFnc_(geometryProxyObj, propertysetObj)
def _look__set_visibility_assigns_create_(self, geometryProxyObj):
def addFnc_(geometryProxyObj_, portProxyObj_):
_portObject = portProxyObj_.bindObject()
_count = self._visibilityAssignStackObj.objectsCount()
_keyString = _portObject.portpathString()
if self._visibilityAssignStackObj._obj_stack__get_obj_exist_(_keyString):
_visibilityObject = self._visibilityAssignStackObj._obj_stack__get_obj_(_keyString)
else:
_visibilityObject = self.CLS_mtx__look__visibility_assign(
self, u'visibility_assign_{}'.format(_count)
)
_visibilityObject.assignVisibility(_portObject)
self._visibilityAssignStackObj._obj_stack__set_obj_add_(_keyString, _visibilityObject)
if _visibilityObject.hasGeometry(geometryProxyObj_) is False:
_visibilityObject.addGeometry(geometryProxyObj_)
geometryVisibilities = geometryProxyObj.changedVisibilities()
if geometryVisibilities:
[addFnc_(geometryProxyObj, i) for i in geometryVisibilities]
# **************************************************************************************************************** #
def _look__get_geometry_namespace_str_(self):
return self.nameString()
def geometryNamespaceString(self):
return self._look__get_geometry_namespace_str_()
# **************************************************************************************************************** #
def file(self):
return self._fileObj
# **************************************************************************************************************** #
def name(self):
return self._nameObj
def nameString(self):
return self._nameObj.toString()
# **************************************************************************************************************** #
def geometries(self):
return self._geometryProxyStackObj.objects()
def hasGeometries(self):
return self._geometryProxyStackObj.hasObjects()
def _look__set_geometry_proxy_add_(self, *args):
geometryProxyObj = args[0]
if geometryProxyObj.namespace().isRoot() is True:
geometryNamespaceStr = self.geometryNamespaceString()
geometryProxyObj.setNamespaceString(geometryNamespaceStr)
# add Variant
# geometryObj = geometryProxyObj.bindObject()
# geometryObj.addVariantObject(self.nameString())
# add geometry
self._geometryProxyStackObj.addObject(geometryProxyObj)
def addGeometry(self, geometryProxyObj):
self._look__set_geometry_proxy_add_(geometryProxyObj)
def addGeometries(self, *args):
if isinstance(args[0], (tuple, list)):
[self.addGeometry(i) for i in list(args[0])]
else:
[self.addGeometry(i) for i in list(args)]
def geometry(self, geometryString):
return self._geometryProxyStackObj.object(geometryString)
def hasGeometry(self, *args):
return self._geometryProxyStackObj._obj_stack__get_obj_exist_(*args)
# **************************************************************************************************************** #
def materialAssigns(self):
return self._materialAssignStackObj.objects()
def propertysetAssigns(self):
return self._propertysetAssignStackObj.objects()
def visibilityAssigns(self):
return self._visibilityAssignStackObj.objects()
# **************************************************************************************************************** #
def hasAssigns(self):
return self.assigns() != []
def assigns(self):
return self.materialAssigns() + self.propertysetAssigns() + self.visibilityAssigns()
def _xmlElementAttaches_(self):
lis = []
for assignObject in self.assigns():
for xmlObject in assignObject._xmlElementAttaches_():
if xmlObject is not None:
if xmlObject not in lis:
lis.append(xmlObject)
return lis
def _xml_obj__get_attribute_list_(self):
return [
self._nameObj
]
def _xml_obj__get_child_element_list_(self):
self._look__set_assigns_create_()
return self.assigns()
def _xml_obj__get_sibling_element_list_(self):
return self._xmlElementAttaches_()
class Abs_MtxFile(
Abs_MtxBasic,
datObjAbs.Abs_DatXmlObj
):
CLS_mtx__file__path = None
CLS_mtx__file__version = None
CLS_mtx__file__reference_stack = None
CLS_mtx__file__reference = None
CLS_mtx__file__look_stack = None
CLS_mtx__file__look = None
VAR_mtx__file__version = None
def __init__(self, *args, **kwargs):
pass
def _initAbsMtxFile(self, *args):
self._filepathObj = self.CLS_mtx__file__path(*args)
self._versionObj = self.CLS_mtx__file__version(self.VAR_mtx__file__version)
self._referenceStackObj = self.CLS_mtx__file__reference_stack()
self._lookStackObj = self.CLS_mtx__file__look_stack(self)
self._initAbsDatXmlObj()
def _file__set_look_add_(self, *args):
if args:
_ = args[0]
if isinstance(_, (str, unicode)):
lookStr = _
lookObject = self.CLS_mtx__file__look(self, lookStr)
elif isinstance(_, self.CLS_mtx__file__look):
lookObject = _
else:
raise TypeError
else:
lookObject = self.CLS_mtx__file__look(self, u'default_look')
self._lookStackObj.addObject(lookObject)
return lookObject
def _file__set_reference_add_(self, *args):
if self.CLS_mtx__file__reference is not None:
referenceCls = self.CLS_mtx__file__reference
else:
referenceCls = self.__class__
if isinstance(args[0], (str, unicode)):
fileObj = referenceCls(args[0])
elif isinstance(args[0], referenceCls):
fileObj = args[0]
else:
fileObj = referenceCls(u'default')
keyString = fileObj.fullpathFilename()
self._referenceStackObj._obj_stack__set_obj_add_(keyString, fileObj)
def filepath(self):
return self._filepathObj
def fullpathFilename(self):
return self._filepathObj.toString()
def version(self):
return self._versionObj
def versionString(self):
return self._versionObj.toString()
def addReference(self, fileObject):
self._file__set_reference_add_(fileObject)
def references(self):
return self._referenceStackObj.objects()
def reference(self, fileString):
return self._referenceStackObj.object(fileString)
def hasLook(self, lookStr):
return self._lookStackObj._obj_stack__get_obj_exist_(lookStr)
def addLook(self, *args):
"""
:param args:
1.str
2.instance of "Look"
:return:
"""
return self._file__set_look_add_(*args)
def looks(self):
return self._lookStackObj.objects()
def look(self, lookStr):
return self._lookStackObj.object(lookStr)
def lookIndex(self, *args):
return self._lookStackObj._obj_stack__get_obj_index_(*args)
def save(self):
xmlDoc = self.__str__()
bscMethods.OsFile.write(
self.fullpathFilename(), xmlDoc
)
def _xml_obj__get_attribute_list_(self):
return [
self.version()
]
def _xml_obj__get_child_element_list_(self):
return self.references() + self.looks()
class Abs_MtxReference(Abs_MtxFile):
def _initAbsMtxReference(self, *args):
self._initAbsMtxFile(*args)
# xml ************************************************************************************************************ #
def _xml_obj__get_attribute_list_(self):
return [
self._filepathObj
]
# ******************************************************************************************************************** #
class Abs_MtxTrsLook(Abs_MtxBasic):
CLS_mtx__trs_look__tgt_look = None
CLS_mtx__trs_look__trs_geometry_proxy = None
def _initAbsMtxTrsLook(self, *args):
trsFileArg, tgtLookArg = args
self._trsFileObj = trsFileArg
tgtFileObj = trsFileArg.tgtFile()
self._tgtLookObj = self.CLS_mtx__trs_look__tgt_look(tgtFileObj, tgtLookArg)
def trsFile(self):
return self._trsFileObj
def tgtLook(self):
return self._tgtLookObj
def addSrcGeometry(self, srcNodepathStr):
# geometry namespace = look name
namespaceStr = self.tgtLook().nameString()
trsGeometryProxyObj = self.CLS_mtx__trs_look__trs_geometry_proxy(
srcNodepathStr,
namespace=namespaceStr
)
# target
tgtGeometryProxyObj = trsGeometryProxyObj.tgtNodeProxy()
if self.tgtLook().hasGeometry(tgtGeometryProxyObj) is False:
self.tgtLook().addGeometry(tgtGeometryProxyObj)
else:
bscMethods.PyMessage.traceWarning(
u'''Geometry "{}" is Exist.'''.format(tgtGeometryProxyObj.pathString())
)
def addSrcGeometries(self, *args):
if isinstance(args[0], (list, tuple)):
_ = args[0]
else:
_ = args
[self.addSrcGeometry(i) for i in _]
def _mtx__trs_look__set_material_assign_add_(self, *args):
pass
def addAssign(self, *args):
self._mtx__trs_look__set_material_assign_add_(*args)
def __str__(self):
return self._tgtLookObj.__str__()
# ******************************************************************************************************************** #
class Abs_MtxTrsFile(Abs_MtxBasic):
CLS_mtx__trs_file__tgt_file = None
CLS_mtx__trs_file__trs_look = None
IST_mtx__trs_file__trs_obj_queue = None
def _initAbsMtxTrsFile(self, *args):
fileString = args[0]
self._tgtFileObj = self.CLS_mtx__trs_file__tgt_file(fileString)
self._tgtFileObj.addReference(
u'materialx/arnold/nodedefs.mtlx'
)
def tgtFile(self):
return self._tgtFileObj
def addLook(self, lookStr):
trsLookObj = self.CLS_mtx__trs_file__trs_look(self, lookStr)
if self._tgtFileObj.hasLook(lookStr) is False:
tgtLookObk = trsLookObj.tgtLook()
self._tgtFileObj.addLook(tgtLookObk)
else:
bscMethods.PyMessage.traceWarning(
u'''Look "{}" is Exist.'''.format(lookStr)
)
return trsLookObj
def tgtLook(self, lookStr):
return self._tgtFileObj.look(lookStr)
def tgtLooks(self):
return self._tgtFileObj.looks()
def save(self):
for i in self.IST_mtx__trs_file__trs_obj_queue.nodes():
i._grh__trs_node__set_after_expressions_run_()
self._tgtFileObj.save()
bscMethods.PyMessage.traceResult(
u'save file "{}"'.format(
self._tgtFileObj.fullpathFilename()
)
)
def __str__(self):
for i in self.IST_mtx__trs_file__trs_obj_queue.nodes():
i._grh__trs_node__set_after_expressions_run_()
return self._tgtFileObj.__str__()
| el | 0.255187 | # coding:utf-8 # ******************************************************************************************************************** # # **************************************************************************************************************** # # property # port # **************************************************************************************************************** # # add parent first # **************************************************************************************************************** # # ******************************************************************************************************************** # # **************************************************************************************************************** # # **************************************************************************************************************** # # **************************************************************************************************************** # # ******************************************************************************************************************** # # raw **************************************************************************************************************** # # xml ************************************************************************************************************ # # xml ************************************************************************************************************ # # xml ************************************************************************************************************ # # xml ************************************************************************************************************ # # **************************************************************************************************************** # # **************************************************************************************************************** # # **************************************************************************************************************** # # **************************************************************************************************************** # # object set ********************************************************************************************************* # # **************************************************************************************************************** # # value ************************************************************************************************************** # # **************************************************************************************************************** # # ******************************************************************************************************************** # # xml ************************************************************************************************************ # # <... nodename="nodepath" member="parent portpath" channel="portname" /> # <... nodename = "nodepath" member = "portpath" /> # xml ************************************************************************************************************ # # port proxy ********************************************************************************************************* # # **************************************************************************************************************** # # xml ************************************************************************************************************ # # update shader's node graph first # node graph # xml ************************************************************************************************************ # # node graph ********************************************************************************************************* # # **************************************************************************************************************** # # xml ************************************************************************************************************ # # portset ************************************************************************************************************ # :return: str :param nameString: str :return: None # geometry collection # **************************************************************************************************************** # :return: str :param nameString: str :return: None :param geometryProxyObj: object of Geometry :return: :return: list(object or geometry, ...) :return: bool :return: list(str, ...) :return: list(str, ...) :param collectionObject: object of Collection :return: None :return: bool :return: list(object of Collection, ...) :return: list(str, ...) # assign ************************************************************************************************************* # # **************************************************************************************************************** # :return: str :param nameString: str :return: None # **************************************************************************************************************** # # **************************************************************************************************************** # :param geometryProxyObj: object of Geometry :return: None :return: list(object or geometry, ...) :return: bool :return: list(str, ...) :return: list(str, ...) # **************************************************************************************************************** # :param collectionObject: object of Collection :return: None :return: object of Collection :param tgtMaterialObj: object of MaterialProxy :return: :return: object of ShaderSet :param args: 1.str 2.instance of "Propertyset" :return: instance of "Propertyset" :return: object of Propertyset # xml ************************************************************************************************************ # # ******************************************************************************************************************** # # **************************************************************************************************************** # # # namespaceStr = self.nameString() # materialProxyObj = geometryProxyObj.inputNodeProxy(namespaceStr) # if materialProxyObj is not None: # addFnc_(geometryProxyObj, materialProxyObj) # _materialAssignObj = self.CLS_mtx__look__material_assign( # self, u'material_assign_{}'.format(_count) # ) # **************************************************************************************************************** # # **************************************************************************************************************** # # **************************************************************************************************************** # # **************************************************************************************************************** # # add Variant # geometryObj = geometryProxyObj.bindObject() # geometryObj.addVariantObject(self.nameString()) # add geometry # **************************************************************************************************************** # # **************************************************************************************************************** # :param args: 1.str 2.instance of "Look" :return: # xml ************************************************************************************************************ # # ******************************************************************************************************************** # # geometry namespace = look name # target Geometry "{}" is Exist. # ******************************************************************************************************************** # Look "{}" is Exist. | 2.240978 | 2 |
lib/pyreadline/lineeditor/lineobj.py | dorcia592/mcplayeredit | 46 | 6632956 | <gh_stars>10-100
# -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2006 <NAME>. <<EMAIL>>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import re, operator, sys
import wordmatcher
import pyreadline.clipboard as clipboard
from pyreadline.logger import log
from pyreadline.unicode_helper import ensure_unicode
kill_ring_to_clipboard = False #set to true to copy every addition to kill ring to clipboard
class NotAWordError(IndexError):
pass
def quote_char(c):
if ord(c) > 0:
return c
############## Line positioner ########################
class LinePositioner(object):
def __call__(self, line):
NotImplementedError(u"Base class !!!")
class NextChar(LinePositioner):
def __call__(self, line):
if line.point < len(line.line_buffer):
return line.point + 1
else:
return line.point
NextChar = NextChar()
class PrevChar(LinePositioner):
def __call__(self, line):
if line.point > 0:
return line.point - 1
else:
return line.point
PrevChar = PrevChar()
class NextWordStart(LinePositioner):
def __call__(self, line):
return line.next_start_segment(line.line_buffer, line.is_word_token)[line.point]
NextWordStart = NextWordStart()
class NextWordEnd(LinePositioner):
def __call__(self, line):
return line.next_end_segment(line.line_buffer, line.is_word_token)[line.point]
NextWordEnd = NextWordEnd()
class PrevWordStart(LinePositioner):
def __call__(self, line):
return line.prev_start_segment(line.line_buffer, line.is_word_token)[line.point]
PrevWordStart = PrevWordStart()
class WordStart(LinePositioner):
def __call__(self, line):
if line.is_word_token(line.get_line_text()[Point(line):Point(line) + 1]):
if Point(line) > 0 and line.is_word_token(line.get_line_text()[Point(line) - 1:Point(line)]):
return PrevWordStart(line)
else:
return line.point
else:
raise NotAWordError(u"Point is not in a word")
WordStart = WordStart()
class WordEnd(LinePositioner):
def __call__(self, line):
if line.is_word_token(line.get_line_text()[Point(line):Point(line) + 1]):
if line.is_word_token(line.get_line_text()[Point(line) + 1:Point(line) + 2]):
return NextWordEnd(line)
else:
return line.point
else:
raise NotAWordError(u"Point is not in a word")
WordEnd = WordEnd()
class PrevWordEnd(LinePositioner):
def __call__(self, line):
return line.prev_end_segment(line.line_buffer, line.is_word_token)[line.point]
PrevWordEnd = PrevWordEnd()
class PrevSpace(LinePositioner):
def __call__(self, line):
point = line.point
if line[point - 1:point].get_line_text() == u" ":
while point > 0 and line[point - 1:point].get_line_text() == u" ":
point -= 1
while point > 0 and line[point - 1:point].get_line_text() != u" ":
point -= 1
return point
PrevSpace = PrevSpace()
class StartOfLine(LinePositioner):
def __call__(self, line):
return 0
StartOfLine = StartOfLine()
class EndOfLine(LinePositioner):
def __call__(self, line):
return len(line.line_buffer)
EndOfLine = EndOfLine()
class Point(LinePositioner):
def __call__(self, line):
return line.point
Point = Point()
class Mark(LinePositioner):
def __call__(self, line):
return line.mark
k = Mark()
all_positioners = [(value.__class__.__name__, value)
for key, value in globals().items()
if isinstance(value, LinePositioner)]
all_positioners.sort()
############### LineSlice #################
class LineSlice(object):
def __call__(self, line):
NotImplementedError(u"Base class !!!")
class CurrentWord(LineSlice):
def __call__(self, line):
return slice(WordStart(line), WordEnd(line), None)
CurrentWord = CurrentWord()
class NextWord(LineSlice):
def __call__(self, line):
work = TextLine(line)
work.point = NextWordStart
start = work.point
stop = NextWordEnd(work)
return slice(start, stop)
NextWord = NextWord()
class PrevWord(LineSlice):
def __call__(self, line):
work = TextLine(line)
work.point = PrevWordEnd
stop = work.point
start = PrevWordStart(work)
return slice(start, stop)
PrevWord = PrevWord()
class PointSlice(LineSlice):
def __call__(self, line):
return slice(Point(line), Point(line) + 1, None)
PointSlice = PointSlice()
############### TextLine ######################
class TextLine(object):
def __init__(self, txtstr, point = None, mark = None):
self.line_buffer = []
self._point = 0
self.mark = -1
self.undo_stack = []
self.overwrite = False
if isinstance(txtstr, TextLine): #copy
self.line_buffer = txtstr.line_buffer[:]
if point is None:
self.point = txtstr.point
else:
self.point = point
if mark is None:
self.mark = txtstr.mark
else:
self.mark = mark
else:
self._insert_text(txtstr)
if point is None:
self.point = 0
else:
self.point = point
if mark is None:
self.mark = -1
else:
self.mark = mark
self.is_word_token = wordmatcher.is_word_token
self.next_start_segment = wordmatcher.next_start_segment
self.next_end_segment = wordmatcher.next_end_segment
self.prev_start_segment = wordmatcher.prev_start_segment
self.prev_end_segment = wordmatcher.prev_end_segment
def push_undo(self):
ltext = self.get_line_text()
if self.undo_stack and ltext == self.undo_stack[-1].get_line_text():
self.undo_stack[-1].point = self.point
else:
self.undo_stack.append(self.copy())
def pop_undo(self):
if len(self.undo_stack) >= 2:
self.undo_stack.pop()
self.set_top_undo()
self.undo_stack.pop()
else:
self.reset_line()
self.undo_stack = []
def set_top_undo(self):
if self.undo_stack:
undo = self.undo_stack[-1]
self.line_buffer = undo.line_buffer
self.point = undo.point
self.mark = undo.mark
else:
pass
def __repr__(self):
return u'TextLine("%s",point=%s,mark=%s)'%(self.line_buffer, self.point, self.mark)
def copy(self):
return self.__class__(self)
def set_point(self,value):
if isinstance(value, LinePositioner):
value = value(self)
assert (value <= len(self.line_buffer))
if value > len(self.line_buffer):
value = len(self.line_buffer)
self._point = value
def get_point(self):
return self._point
point = property(get_point, set_point)
def visible_line_width(self, position = Point):
"""Return the visible width of the text in line buffer up to position."""
extra_char_width = len([ None for c in self[:position].line_buffer if 0x2013 <= ord(c) <= 0xFFFD])
return len(self[:position].quoted_text()) + self[:position].line_buffer.count(u"\t")*7 + extra_char_width
def quoted_text(self):
quoted = [ quote_char(c) for c in self.line_buffer ]
self.line_char_width = [ len(c) for c in quoted ]
return u''.join(map(ensure_unicode, quoted))
def get_line_text(self):
buf = self.line_buffer
buf = map(ensure_unicode, buf)
return u''.join(buf)
def set_line(self, text, cursor = None):
self.line_buffer = [ c for c in str(text) ]
if cursor is None:
self.point = len(self.line_buffer)
else:
self.point = cursor
def reset_line(self):
self.line_buffer = []
self.point = 0
def end_of_line(self):
self.point = len(self.line_buffer)
def _insert_text(self, text, argument=1):
text = text * argument
if self.overwrite:
for c in text:
#if self.point:
self.line_buffer[self.point] = c
self.point += 1
else:
for c in text:
self.line_buffer.insert(self.point, c)
self.point += 1
def __getitem__(self, key):
#Check if key is LineSlice, convert to regular slice
#and continue processing
if isinstance(key, LineSlice):
key = key(self)
if isinstance(key, slice):
if key.step is None:
pass
else:
raise Error
if key.start is None:
start = StartOfLine(self)
elif isinstance(key.start,LinePositioner):
start = key.start(self)
else:
start = key.start
if key.stop is None:
stop = EndOfLine(self)
elif isinstance(key.stop, LinePositioner):
stop = key.stop(self)
else:
stop = key.stop
return self.__class__(self.line_buffer[start:stop], point=0)
elif isinstance(key, LinePositioner):
return self.line_buffer[key(self)]
elif isinstance(key, tuple):
raise IndexError(u"Cannot use step in line buffer indexing") #Multiple slice not allowed
else:
# return TextLine(self.line_buffer[key])
return self.line_buffer[key]
def __delitem__(self, key):
point = self.point
if isinstance(key, LineSlice):
key = key(self)
if isinstance(key, slice):
start = key.start
stop = key.stop
if isinstance(start, LinePositioner):
start = start(self)
elif start is None:
start=0
if isinstance(stop, LinePositioner):
stop = stop(self)
elif stop is None:
stop = EndOfLine(self)
elif isinstance(key, LinePositioner):
start = key(self)
stop = start + 1
else:
start = key
stop = key + 1
prev = self.line_buffer[:start]
rest = self.line_buffer[stop:]
self.line_buffer = prev + rest
if point > stop:
self.point = point - (stop - start)
elif point >= start and point <= stop:
self.point = start
def __setitem__(self, key, value):
if isinstance(key, LineSlice):
key = key(self)
if isinstance(key, slice):
start = key.start
stop = key.stop
elif isinstance(key, LinePositioner):
start = key(self)
stop = start + 1
else:
start = key
stop = key + 1
prev = self.line_buffer[:start]
value = self.__class__(value).line_buffer
rest = self.line_buffer[stop:]
out = prev + value + rest
if len(out) >= len(self):
self.point = len(self)
self.line_buffer = out
def __len__(self):
return len(self.line_buffer)
def upper(self):
self.line_buffer = [x.upper() for x in self.line_buffer]
return self
def lower(self):
self.line_buffer = [x.lower() for x in self.line_buffer]
return self
def capitalize(self):
self.set_line(self.get_line_text().capitalize(), self.point)
return self
def startswith(self, txt):
return self.get_line_text().startswith(txt)
def endswith(self, txt):
return self.get_line_text().endswith(txt)
def __contains__(self, txt):
return txt in self.get_line_text()
lines = [TextLine(u"abc"),
TextLine(u"abc def"),
TextLine(u"abc def ghi"),
TextLine(u" abc def "),
]
l = lines[2]
l.point = 5
class ReadLineTextBuffer(TextLine):
def __init__(self,txtstr, point = None, mark = None):
super(ReadLineTextBuffer, self).__init__(txtstr, point, mark)
self.enable_win32_clipboard = True
self.selection_mark = -1
self.enable_selection = True
self.kill_ring = []
def __repr__(self):
return u'ReadLineTextBuffer'\
u'("%s",point=%s,mark=%s,selection_mark=%s)'%\
(self.line_buffer, self.point, self.mark,self.selection_mark)
def insert_text(self, char, argument=1):
self.delete_selection()
self.selection_mark = -1
self._insert_text(char, argument)
def to_clipboard(self):
if self.enable_win32_clipboard:
clipboard.set_clipboard_text(self.get_line_text())
######### Movement
def beginning_of_line(self):
self.selection_mark = -1
self.point = StartOfLine
def end_of_line(self):
self.selection_mark = -1
self.point = EndOfLine
def forward_char(self,argument = 1):
if argument < 0:
self.backward_char(-argument)
self.selection_mark = -1
for x in range(argument):
self.point = NextChar
def backward_char(self, argument=1):
if argument < 0:
self.forward_char(-argument)
self.selection_mark = -1
for x in range(argument):
self.point = PrevChar
def forward_word(self,argument=1):
if argument<0:
self.backward_word(-argument)
self.selection_mark=-1
for x in range(argument):
self.point = NextWordStart
def backward_word(self, argument=1):
if argument < 0:
self.forward_word(-argument)
self.selection_mark = -1
for x in range(argument):
self.point = PrevWordStart
def forward_word_end(self, argument=1):
if argument < 0:
self.backward_word_end(-argument)
self.selection_mark = -1
for x in range(argument):
self.point = NextWordEnd
def backward_word_end(self, argument=1):
if argument < 0:
self.forward_word_end(-argument)
self.selection_mark = -1
for x in range(argument):
self.point = NextWordEnd
######### Movement select
def beginning_of_line_extend_selection(self):
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
self.point = StartOfLine
def end_of_line_extend_selection(self):
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
self.point = EndOfLine
def forward_char_extend_selection(self,argument=1):
if argument < 0:
self.backward_char_extend_selection(-argument)
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
for x in range(argument):
self.point = NextChar
def backward_char_extend_selection(self, argument=1):
if argument < 0:
self.forward_char_extend_selection(-argument)
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
for x in range(argument):
self.point = PrevChar
def forward_word_extend_selection(self, argument=1):
if argument < 0:
self.backward_word_extend_selection(-argument)
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
for x in range(argument):
self.point = NextWordStart
def backward_word_extend_selection(self, argument=1):
if argument < 0:
self.forward_word_extend_selection(-argument)
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
for x in range(argument):
self.point = PrevWordStart
def forward_word_end_extend_selection(self, argument=1):
if argument < 0:
self.backward_word_end_extend_selection(-argument)
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
for x in range(argument):
self.point = NextWordEnd
def backward_word_end_extend_selection(self, argument=1):
if argument < 0:
self.forward_word_end_extend_selection(-argument)
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
for x in range(argument):
self.point = PrevWordEnd
######### delete
def delete_selection(self):
if self.enable_selection and self.selection_mark >= 0:
if self.selection_mark < self.point:
del self[self.selection_mark:self.point]
self.selection_mark = -1
else:
del self[self.point:self.selection_mark]
self.selection_mark = -1
return True
else:
self.selection_mark = -1
return False
def delete_char(self, argument=1):
if argument < 0:
self.backward_delete_char(-argument)
if self.delete_selection():
argument -= 1
for x in range(argument):
del self[Point]
def backward_delete_char(self, argument=1):
if argument < 0:
self.delete_char(-argument)
if self.delete_selection():
argument -= 1
for x in range(argument):
if self.point > 0:
self.backward_char()
self.delete_char()
def forward_delete_word(self, argument=1):
if argument < 0:
self.backward_delete_word(-argument)
if self.delete_selection():
argument -= 1
for x in range(argument):
del self[Point:NextWordStart]
def backward_delete_word(self, argument=1):
if argument < 0:
self.forward_delete_word(-argument)
if self.delete_selection():
argument -= 1
for x in range(argument):
del self[PrevWordStart:Point]
def delete_current_word(self):
if not self.delete_selection():
del self[CurrentWord]
self.selection_mark =- 1
def delete_horizontal_space(self):
if self[Point] in " \t":
del self[PrevWordEnd:NextWordStart]
self.selection_mark = -1
######### Case
def upcase_word(self):
p = self.point
try:
self[CurrentWord] = self[CurrentWord].upper()
self.point = p
except NotAWordError:
pass
def downcase_word(self):
p = self.point
try:
self[CurrentWord] = self[CurrentWord].lower()
self.point = p
except NotAWordError:
pass
def capitalize_word(self):
p = self.point
try:
self[CurrentWord] = self[CurrentWord].capitalize()
self.point = p
except NotAWordError:
pass
########### Transpose
def transpose_chars(self):
p2 = Point(self)
if p2 == 0:
return
elif p2 == len(self):
p2 = p2 - 1
p1 = p2 - 1
self[p2], self[p1] = self[p1], self[p2]
self.point = p2 + 1
def transpose_words(self):
word1 = TextLine(self)
word2 = TextLine(self)
if self.point == len(self):
word2.point = PrevWordStart
word1.point = PrevWordStart(word2)
else:
word1.point = PrevWordStart
word2.point = NextWordStart
stop1 = NextWordEnd(word1)
stop2 = NextWordEnd(word2)
start1 = word1.point
start2 = word2.point
self[start2:stop2] = word1[Point:NextWordEnd]
self[start1:stop1] = word2[Point:NextWordEnd]
self.point = stop2
############ Kill
def kill_line(self):
self.add_to_kill_ring(self[self.point:])
del self.line_buffer[self.point:]
def kill_whole_line(self):
self.add_to_kill_ring(self[:])
del self[:]
def backward_kill_line(self):
del self[StartOfLine:Point]
def unix_line_discard(self):
del self[StartOfLine:Point]
pass
def kill_word(self):
"""Kills to next word ending"""
del self[Point:NextWordEnd]
def backward_kill_word(self):
"""Kills to next word ending"""
if not self.delete_selection():
del self[PrevWordStart:Point]
self.selection_mark = -1
def forward_kill_word(self):
"""Kills to next word ending"""
if not self.delete_selection():
del self[Point:NextWordEnd]
self.selection_mark = -1
def unix_word_rubout(self):
if not self.delete_selection():
del self[PrevSpace:Point]
self.selection_mark = -1
def kill_region(self):
pass
def copy_region_as_kill(self):
pass
def copy_backward_word(self):
pass
def copy_forward_word(self):
pass
def yank(self):
self.paste_from_kill_ring()
def yank_pop(self):
pass
############## Mark
def set_mark(self):
self.mark = self.point
def exchange_point_and_mark(self):
pass
def copy_region_to_clipboard(self): # ()
u'''Copy the text in the region to the windows clipboard.'''
if self.enable_win32_clipboard:
mark = min(self.mark, len(self.line_buffer))
cursor = min(self.point, len(self.line_buffer))
if self.mark == -1:
return
begin = min(cursor, mark)
end = max(cursor, mark)
toclipboard = u"".join(self.line_buffer[begin:end])
clipboard.SetClipboardText(toclipboard)
def copy_selection_to_clipboard(self): # ()
u'''Copy the text in the region to the windows clipboard.'''
if self.enable_win32_clipboard and self.enable_selection and self.selection_mark >= 0:
selection_mark = min(self.selection_mark,len(self.line_buffer))
cursor = min(self.point,len(self.line_buffer))
if self.selection_mark == -1:
return
begin = min(cursor, selection_mark)
end = max(cursor, selection_mark)
toclipboard = u"".join(self.line_buffer[begin:end])
clipboard.SetClipboardText(toclipboard)
def cut_selection_to_clipboard(self): # ()
self.copy_selection_to_clipboard()
self.delete_selection()
############## Paste
############## Kill ring
def add_to_kill_ring(self,txt):
self.kill_ring = [txt]
if kill_ring_to_clipboard:
clipboard.SetClipboardText(txt.get_line_text())
def paste_from_kill_ring(self):
if self.kill_ring:
self.insert_text(self.kill_ring[0])
##################################################################
q = ReadLineTextBuffer(u"asff asFArw ewrWErhg", point=8)
q = TextLine(u"asff asFArw ewrWErhg", point=8)
def show_pos(buff, pos, chr = u"."):
l = len(buff.line_buffer)
def choice(bool):
if bool:
return chr
else:
return u" "
return u"".join([choice(pos==idx) for idx in range(l + 1)])
def test_positioner(buff, points, positioner):
print (u" %s "%positioner.__class__.__name__).center(40, u"-")
buffstr = buff.line_buffer
print u'"%s"'%(buffstr)
for point in points:
b = TextLine(buff, point = point)
out=[u" "] * (len(buffstr) + 1)
pos = positioner(b)
if pos == point:
out[pos] = u"&"
else:
out[point] = u"."
out[pos] = u"^"
print u'"%s"'%(u"".join(out))
if __name__ == "__main__":
print u'%-15s "%s"'%(u"Position", q.get_line_text())
print u'%-15s "%s"'%(u"Point", show_pos(q, q.point))
for name, positioner in all_positioners:
pos = positioner(q)
[]
print u'%-15s "%s"'%(name, show_pos(q, pos, u"^"))
l = ReadLineTextBuffer(u"kjjk asads asad")
l.point = EndOfLine
| # -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2006 <NAME>. <<EMAIL>>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import re, operator, sys
import wordmatcher
import pyreadline.clipboard as clipboard
from pyreadline.logger import log
from pyreadline.unicode_helper import ensure_unicode
kill_ring_to_clipboard = False #set to true to copy every addition to kill ring to clipboard
class NotAWordError(IndexError):
pass
def quote_char(c):
if ord(c) > 0:
return c
############## Line positioner ########################
class LinePositioner(object):
def __call__(self, line):
NotImplementedError(u"Base class !!!")
class NextChar(LinePositioner):
def __call__(self, line):
if line.point < len(line.line_buffer):
return line.point + 1
else:
return line.point
NextChar = NextChar()
class PrevChar(LinePositioner):
def __call__(self, line):
if line.point > 0:
return line.point - 1
else:
return line.point
PrevChar = PrevChar()
class NextWordStart(LinePositioner):
def __call__(self, line):
return line.next_start_segment(line.line_buffer, line.is_word_token)[line.point]
NextWordStart = NextWordStart()
class NextWordEnd(LinePositioner):
def __call__(self, line):
return line.next_end_segment(line.line_buffer, line.is_word_token)[line.point]
NextWordEnd = NextWordEnd()
class PrevWordStart(LinePositioner):
def __call__(self, line):
return line.prev_start_segment(line.line_buffer, line.is_word_token)[line.point]
PrevWordStart = PrevWordStart()
class WordStart(LinePositioner):
def __call__(self, line):
if line.is_word_token(line.get_line_text()[Point(line):Point(line) + 1]):
if Point(line) > 0 and line.is_word_token(line.get_line_text()[Point(line) - 1:Point(line)]):
return PrevWordStart(line)
else:
return line.point
else:
raise NotAWordError(u"Point is not in a word")
WordStart = WordStart()
class WordEnd(LinePositioner):
def __call__(self, line):
if line.is_word_token(line.get_line_text()[Point(line):Point(line) + 1]):
if line.is_word_token(line.get_line_text()[Point(line) + 1:Point(line) + 2]):
return NextWordEnd(line)
else:
return line.point
else:
raise NotAWordError(u"Point is not in a word")
WordEnd = WordEnd()
class PrevWordEnd(LinePositioner):
def __call__(self, line):
return line.prev_end_segment(line.line_buffer, line.is_word_token)[line.point]
PrevWordEnd = PrevWordEnd()
class PrevSpace(LinePositioner):
def __call__(self, line):
point = line.point
if line[point - 1:point].get_line_text() == u" ":
while point > 0 and line[point - 1:point].get_line_text() == u" ":
point -= 1
while point > 0 and line[point - 1:point].get_line_text() != u" ":
point -= 1
return point
PrevSpace = PrevSpace()
class StartOfLine(LinePositioner):
def __call__(self, line):
return 0
StartOfLine = StartOfLine()
class EndOfLine(LinePositioner):
def __call__(self, line):
return len(line.line_buffer)
EndOfLine = EndOfLine()
class Point(LinePositioner):
def __call__(self, line):
return line.point
Point = Point()
class Mark(LinePositioner):
def __call__(self, line):
return line.mark
k = Mark()
all_positioners = [(value.__class__.__name__, value)
for key, value in globals().items()
if isinstance(value, LinePositioner)]
all_positioners.sort()
############### LineSlice #################
class LineSlice(object):
def __call__(self, line):
NotImplementedError(u"Base class !!!")
class CurrentWord(LineSlice):
def __call__(self, line):
return slice(WordStart(line), WordEnd(line), None)
CurrentWord = CurrentWord()
class NextWord(LineSlice):
def __call__(self, line):
work = TextLine(line)
work.point = NextWordStart
start = work.point
stop = NextWordEnd(work)
return slice(start, stop)
NextWord = NextWord()
class PrevWord(LineSlice):
def __call__(self, line):
work = TextLine(line)
work.point = PrevWordEnd
stop = work.point
start = PrevWordStart(work)
return slice(start, stop)
PrevWord = PrevWord()
class PointSlice(LineSlice):
def __call__(self, line):
return slice(Point(line), Point(line) + 1, None)
PointSlice = PointSlice()
############### TextLine ######################
class TextLine(object):
def __init__(self, txtstr, point = None, mark = None):
self.line_buffer = []
self._point = 0
self.mark = -1
self.undo_stack = []
self.overwrite = False
if isinstance(txtstr, TextLine): #copy
self.line_buffer = txtstr.line_buffer[:]
if point is None:
self.point = txtstr.point
else:
self.point = point
if mark is None:
self.mark = txtstr.mark
else:
self.mark = mark
else:
self._insert_text(txtstr)
if point is None:
self.point = 0
else:
self.point = point
if mark is None:
self.mark = -1
else:
self.mark = mark
self.is_word_token = wordmatcher.is_word_token
self.next_start_segment = wordmatcher.next_start_segment
self.next_end_segment = wordmatcher.next_end_segment
self.prev_start_segment = wordmatcher.prev_start_segment
self.prev_end_segment = wordmatcher.prev_end_segment
def push_undo(self):
ltext = self.get_line_text()
if self.undo_stack and ltext == self.undo_stack[-1].get_line_text():
self.undo_stack[-1].point = self.point
else:
self.undo_stack.append(self.copy())
def pop_undo(self):
if len(self.undo_stack) >= 2:
self.undo_stack.pop()
self.set_top_undo()
self.undo_stack.pop()
else:
self.reset_line()
self.undo_stack = []
def set_top_undo(self):
if self.undo_stack:
undo = self.undo_stack[-1]
self.line_buffer = undo.line_buffer
self.point = undo.point
self.mark = undo.mark
else:
pass
def __repr__(self):
return u'TextLine("%s",point=%s,mark=%s)'%(self.line_buffer, self.point, self.mark)
def copy(self):
return self.__class__(self)
def set_point(self,value):
if isinstance(value, LinePositioner):
value = value(self)
assert (value <= len(self.line_buffer))
if value > len(self.line_buffer):
value = len(self.line_buffer)
self._point = value
def get_point(self):
return self._point
point = property(get_point, set_point)
def visible_line_width(self, position = Point):
"""Return the visible width of the text in line buffer up to position."""
extra_char_width = len([ None for c in self[:position].line_buffer if 0x2013 <= ord(c) <= 0xFFFD])
return len(self[:position].quoted_text()) + self[:position].line_buffer.count(u"\t")*7 + extra_char_width
def quoted_text(self):
quoted = [ quote_char(c) for c in self.line_buffer ]
self.line_char_width = [ len(c) for c in quoted ]
return u''.join(map(ensure_unicode, quoted))
def get_line_text(self):
buf = self.line_buffer
buf = map(ensure_unicode, buf)
return u''.join(buf)
def set_line(self, text, cursor = None):
self.line_buffer = [ c for c in str(text) ]
if cursor is None:
self.point = len(self.line_buffer)
else:
self.point = cursor
def reset_line(self):
self.line_buffer = []
self.point = 0
def end_of_line(self):
self.point = len(self.line_buffer)
def _insert_text(self, text, argument=1):
text = text * argument
if self.overwrite:
for c in text:
#if self.point:
self.line_buffer[self.point] = c
self.point += 1
else:
for c in text:
self.line_buffer.insert(self.point, c)
self.point += 1
def __getitem__(self, key):
#Check if key is LineSlice, convert to regular slice
#and continue processing
if isinstance(key, LineSlice):
key = key(self)
if isinstance(key, slice):
if key.step is None:
pass
else:
raise Error
if key.start is None:
start = StartOfLine(self)
elif isinstance(key.start,LinePositioner):
start = key.start(self)
else:
start = key.start
if key.stop is None:
stop = EndOfLine(self)
elif isinstance(key.stop, LinePositioner):
stop = key.stop(self)
else:
stop = key.stop
return self.__class__(self.line_buffer[start:stop], point=0)
elif isinstance(key, LinePositioner):
return self.line_buffer[key(self)]
elif isinstance(key, tuple):
raise IndexError(u"Cannot use step in line buffer indexing") #Multiple slice not allowed
else:
# return TextLine(self.line_buffer[key])
return self.line_buffer[key]
def __delitem__(self, key):
point = self.point
if isinstance(key, LineSlice):
key = key(self)
if isinstance(key, slice):
start = key.start
stop = key.stop
if isinstance(start, LinePositioner):
start = start(self)
elif start is None:
start=0
if isinstance(stop, LinePositioner):
stop = stop(self)
elif stop is None:
stop = EndOfLine(self)
elif isinstance(key, LinePositioner):
start = key(self)
stop = start + 1
else:
start = key
stop = key + 1
prev = self.line_buffer[:start]
rest = self.line_buffer[stop:]
self.line_buffer = prev + rest
if point > stop:
self.point = point - (stop - start)
elif point >= start and point <= stop:
self.point = start
def __setitem__(self, key, value):
if isinstance(key, LineSlice):
key = key(self)
if isinstance(key, slice):
start = key.start
stop = key.stop
elif isinstance(key, LinePositioner):
start = key(self)
stop = start + 1
else:
start = key
stop = key + 1
prev = self.line_buffer[:start]
value = self.__class__(value).line_buffer
rest = self.line_buffer[stop:]
out = prev + value + rest
if len(out) >= len(self):
self.point = len(self)
self.line_buffer = out
def __len__(self):
return len(self.line_buffer)
def upper(self):
self.line_buffer = [x.upper() for x in self.line_buffer]
return self
def lower(self):
self.line_buffer = [x.lower() for x in self.line_buffer]
return self
def capitalize(self):
self.set_line(self.get_line_text().capitalize(), self.point)
return self
def startswith(self, txt):
return self.get_line_text().startswith(txt)
def endswith(self, txt):
return self.get_line_text().endswith(txt)
def __contains__(self, txt):
return txt in self.get_line_text()
lines = [TextLine(u"abc"),
TextLine(u"abc def"),
TextLine(u"abc def ghi"),
TextLine(u" abc def "),
]
l = lines[2]
l.point = 5
class ReadLineTextBuffer(TextLine):
def __init__(self,txtstr, point = None, mark = None):
super(ReadLineTextBuffer, self).__init__(txtstr, point, mark)
self.enable_win32_clipboard = True
self.selection_mark = -1
self.enable_selection = True
self.kill_ring = []
def __repr__(self):
return u'ReadLineTextBuffer'\
u'("%s",point=%s,mark=%s,selection_mark=%s)'%\
(self.line_buffer, self.point, self.mark,self.selection_mark)
def insert_text(self, char, argument=1):
self.delete_selection()
self.selection_mark = -1
self._insert_text(char, argument)
def to_clipboard(self):
if self.enable_win32_clipboard:
clipboard.set_clipboard_text(self.get_line_text())
######### Movement
def beginning_of_line(self):
self.selection_mark = -1
self.point = StartOfLine
def end_of_line(self):
self.selection_mark = -1
self.point = EndOfLine
def forward_char(self,argument = 1):
if argument < 0:
self.backward_char(-argument)
self.selection_mark = -1
for x in range(argument):
self.point = NextChar
def backward_char(self, argument=1):
if argument < 0:
self.forward_char(-argument)
self.selection_mark = -1
for x in range(argument):
self.point = PrevChar
def forward_word(self,argument=1):
if argument<0:
self.backward_word(-argument)
self.selection_mark=-1
for x in range(argument):
self.point = NextWordStart
def backward_word(self, argument=1):
if argument < 0:
self.forward_word(-argument)
self.selection_mark = -1
for x in range(argument):
self.point = PrevWordStart
def forward_word_end(self, argument=1):
if argument < 0:
self.backward_word_end(-argument)
self.selection_mark = -1
for x in range(argument):
self.point = NextWordEnd
def backward_word_end(self, argument=1):
if argument < 0:
self.forward_word_end(-argument)
self.selection_mark = -1
for x in range(argument):
self.point = NextWordEnd
######### Movement select
def beginning_of_line_extend_selection(self):
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
self.point = StartOfLine
def end_of_line_extend_selection(self):
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
self.point = EndOfLine
def forward_char_extend_selection(self,argument=1):
if argument < 0:
self.backward_char_extend_selection(-argument)
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
for x in range(argument):
self.point = NextChar
def backward_char_extend_selection(self, argument=1):
if argument < 0:
self.forward_char_extend_selection(-argument)
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
for x in range(argument):
self.point = PrevChar
def forward_word_extend_selection(self, argument=1):
if argument < 0:
self.backward_word_extend_selection(-argument)
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
for x in range(argument):
self.point = NextWordStart
def backward_word_extend_selection(self, argument=1):
if argument < 0:
self.forward_word_extend_selection(-argument)
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
for x in range(argument):
self.point = PrevWordStart
def forward_word_end_extend_selection(self, argument=1):
if argument < 0:
self.backward_word_end_extend_selection(-argument)
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
for x in range(argument):
self.point = NextWordEnd
def backward_word_end_extend_selection(self, argument=1):
if argument < 0:
self.forward_word_end_extend_selection(-argument)
if self.enable_selection and self.selection_mark < 0:
self.selection_mark = self.point
for x in range(argument):
self.point = PrevWordEnd
######### delete
def delete_selection(self):
if self.enable_selection and self.selection_mark >= 0:
if self.selection_mark < self.point:
del self[self.selection_mark:self.point]
self.selection_mark = -1
else:
del self[self.point:self.selection_mark]
self.selection_mark = -1
return True
else:
self.selection_mark = -1
return False
def delete_char(self, argument=1):
if argument < 0:
self.backward_delete_char(-argument)
if self.delete_selection():
argument -= 1
for x in range(argument):
del self[Point]
def backward_delete_char(self, argument=1):
if argument < 0:
self.delete_char(-argument)
if self.delete_selection():
argument -= 1
for x in range(argument):
if self.point > 0:
self.backward_char()
self.delete_char()
def forward_delete_word(self, argument=1):
if argument < 0:
self.backward_delete_word(-argument)
if self.delete_selection():
argument -= 1
for x in range(argument):
del self[Point:NextWordStart]
def backward_delete_word(self, argument=1):
if argument < 0:
self.forward_delete_word(-argument)
if self.delete_selection():
argument -= 1
for x in range(argument):
del self[PrevWordStart:Point]
def delete_current_word(self):
if not self.delete_selection():
del self[CurrentWord]
self.selection_mark =- 1
def delete_horizontal_space(self):
if self[Point] in " \t":
del self[PrevWordEnd:NextWordStart]
self.selection_mark = -1
######### Case
def upcase_word(self):
p = self.point
try:
self[CurrentWord] = self[CurrentWord].upper()
self.point = p
except NotAWordError:
pass
def downcase_word(self):
p = self.point
try:
self[CurrentWord] = self[CurrentWord].lower()
self.point = p
except NotAWordError:
pass
def capitalize_word(self):
p = self.point
try:
self[CurrentWord] = self[CurrentWord].capitalize()
self.point = p
except NotAWordError:
pass
########### Transpose
def transpose_chars(self):
p2 = Point(self)
if p2 == 0:
return
elif p2 == len(self):
p2 = p2 - 1
p1 = p2 - 1
self[p2], self[p1] = self[p1], self[p2]
self.point = p2 + 1
def transpose_words(self):
word1 = TextLine(self)
word2 = TextLine(self)
if self.point == len(self):
word2.point = PrevWordStart
word1.point = PrevWordStart(word2)
else:
word1.point = PrevWordStart
word2.point = NextWordStart
stop1 = NextWordEnd(word1)
stop2 = NextWordEnd(word2)
start1 = word1.point
start2 = word2.point
self[start2:stop2] = word1[Point:NextWordEnd]
self[start1:stop1] = word2[Point:NextWordEnd]
self.point = stop2
############ Kill
def kill_line(self):
self.add_to_kill_ring(self[self.point:])
del self.line_buffer[self.point:]
def kill_whole_line(self):
self.add_to_kill_ring(self[:])
del self[:]
def backward_kill_line(self):
del self[StartOfLine:Point]
def unix_line_discard(self):
del self[StartOfLine:Point]
pass
def kill_word(self):
"""Kills to next word ending"""
del self[Point:NextWordEnd]
def backward_kill_word(self):
"""Kills to next word ending"""
if not self.delete_selection():
del self[PrevWordStart:Point]
self.selection_mark = -1
def forward_kill_word(self):
"""Kills to next word ending"""
if not self.delete_selection():
del self[Point:NextWordEnd]
self.selection_mark = -1
def unix_word_rubout(self):
if not self.delete_selection():
del self[PrevSpace:Point]
self.selection_mark = -1
def kill_region(self):
pass
def copy_region_as_kill(self):
pass
def copy_backward_word(self):
pass
def copy_forward_word(self):
pass
def yank(self):
self.paste_from_kill_ring()
def yank_pop(self):
pass
############## Mark
def set_mark(self):
self.mark = self.point
def exchange_point_and_mark(self):
pass
def copy_region_to_clipboard(self): # ()
u'''Copy the text in the region to the windows clipboard.'''
if self.enable_win32_clipboard:
mark = min(self.mark, len(self.line_buffer))
cursor = min(self.point, len(self.line_buffer))
if self.mark == -1:
return
begin = min(cursor, mark)
end = max(cursor, mark)
toclipboard = u"".join(self.line_buffer[begin:end])
clipboard.SetClipboardText(toclipboard)
def copy_selection_to_clipboard(self): # ()
u'''Copy the text in the region to the windows clipboard.'''
if self.enable_win32_clipboard and self.enable_selection and self.selection_mark >= 0:
selection_mark = min(self.selection_mark,len(self.line_buffer))
cursor = min(self.point,len(self.line_buffer))
if self.selection_mark == -1:
return
begin = min(cursor, selection_mark)
end = max(cursor, selection_mark)
toclipboard = u"".join(self.line_buffer[begin:end])
clipboard.SetClipboardText(toclipboard)
def cut_selection_to_clipboard(self): # ()
self.copy_selection_to_clipboard()
self.delete_selection()
############## Paste
############## Kill ring
def add_to_kill_ring(self,txt):
self.kill_ring = [txt]
if kill_ring_to_clipboard:
clipboard.SetClipboardText(txt.get_line_text())
def paste_from_kill_ring(self):
if self.kill_ring:
self.insert_text(self.kill_ring[0])
##################################################################
q = ReadLineTextBuffer(u"asff asFArw ewrWErhg", point=8)
q = TextLine(u"asff asFArw ewrWErhg", point=8)
def show_pos(buff, pos, chr = u"."):
l = len(buff.line_buffer)
def choice(bool):
if bool:
return chr
else:
return u" "
return u"".join([choice(pos==idx) for idx in range(l + 1)])
def test_positioner(buff, points, positioner):
print (u" %s "%positioner.__class__.__name__).center(40, u"-")
buffstr = buff.line_buffer
print u'"%s"'%(buffstr)
for point in points:
b = TextLine(buff, point = point)
out=[u" "] * (len(buffstr) + 1)
pos = positioner(b)
if pos == point:
out[pos] = u"&"
else:
out[point] = u"."
out[pos] = u"^"
print u'"%s"'%(u"".join(out))
if __name__ == "__main__":
print u'%-15s "%s"'%(u"Position", q.get_line_text())
print u'%-15s "%s"'%(u"Point", show_pos(q, q.point))
for name, positioner in all_positioners:
pos = positioner(q)
[]
print u'%-15s "%s"'%(name, show_pos(q, pos, u"^"))
l = ReadLineTextBuffer(u"kjjk asads asad")
l.point = EndOfLine | en | 0.354081 | # -*- coding: utf-8 -*- #***************************************************************************** # Copyright (C) 2006 <NAME>. <<EMAIL>> # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #***************************************************************************** #set to true to copy every addition to kill ring to clipboard ############## Line positioner ######################## ############### LineSlice ################# ############### TextLine ###################### #copy Return the visible width of the text in line buffer up to position. #if self.point: #Check if key is LineSlice, convert to regular slice #and continue processing #Multiple slice not allowed # return TextLine(self.line_buffer[key]) ######### Movement ######### Movement select ######### delete ######### Case ########### Transpose ############ Kill Kills to next word ending Kills to next word ending Kills to next word ending ############## Mark # () Copy the text in the region to the windows clipboard. # () Copy the text in the region to the windows clipboard. # () ############## Paste ############## Kill ring ################################################################## | 2.459875 | 2 |
config.py | golnazads/export_service | 4 | 6632957 | <reponame>golnazads/export_service
# must be here for adsmutils to override it using env vars
# but if left empty (resolving to False) it won't be used
SERVICE_TOKEN = None
# configuration for accessing solr db
# these values can be overwritten by local_config values
# maximum number of records that can be fetched by bigquery is for now 2000
# this can be overwritten to become smaller but it cannot become larger
# cutoff to use query vs bigquery is 100, anything equal and lower calls query, otherwise bigquery is called
EXPORT_SOLR_BIGQUERY_URL = "https://api.adsabs.harvard.edu/v1/search/bigquery"
EXPORT_SERVICE_MAX_RECORDS_SOLR_BIGQUERY = 2000
EXPORT_SOLR_QUERY_URL = "https://api.adsabs.harvard.edu/v1/search/query"
EXPORT_SERVICE_MAX_RECORDS_SOLR_QUERY = 100
# these are used for linkout links
EXPORT_SERVICE_FROM_BBB_URL = 'https://ui.adsabs.harvard.edu/abs'
EXPORT_SERVICE_RESOLVE_URL = "https://ui.adsabs.harvard.edu/link_gateway"
# added to the end of bibTex
EXPORT_SERVICE_ADS_NOTES = 'Provided by the SAO/NASA Astrophysics Data System'
# sort specified by user when they want the service to keep the same order they have specified
# going to be useful when used through the API
# not giong to be implemented from the UI
EXPORT_SERVICE_NO_SORT_SOLR = 'no sort'
# Journal Abbreviations used in the ADS BibTeX entries
# From http://adsabs.harvard.edu/abs_doc/aas_macros.html
# Journal name TeX macro
EXPORT_SERVICE_AASTEX_JOURNAL_MACRO = [
['AJ', r'\aj'],
['ApJ', r'\apj'],
['AcA', r'\actaa'],
['ARA&A', r'\araa'],
['ApJL', r'\apjl'],
['ApJS', r'\apjs'],
['ApOpt', r'\ao'],
['Ap&SS', r'\apss'],
['A&A', r'\aap'],
['A&ARv', r'\aapr'],
['A&AS', r'\aaps'],
['AZh', r'\azh'],
['BAAS', r'\baas'],
['ChA&A', r'\caa'],
['ChJAA', r'\cjaa'],
['Icar', r'\icarus'],
['JCAP', r'\jcap'],
['JRASC', r'\jrasc'],
['MmRAS', r'\memras'],
['MNRAS', r'\mnras'],
['NewA', r'\na'],
['NewAR', r'\nar'],
['PhRvA', r'\pra'],
['PhRvB', r'\prb'],
['PhRvC', r'\prc'],
['PhRvD', r'\prd'],
['PhRvE', r'\pre'],
['PhRvL', r'\prl'],
['PASA', r'\pasa'],
['PASP', r'\pasp'],
['PASJ', r'\pasj'],
['RMxAA', r'\rmxaa'],
['QJRAS', r'\qjras'],
['S&T', r'\skytel'],
['SoPh', r'\solphys'],
['SvA', r'\sovast'],
['SSRv', r'\ssr'],
['ZA', r'\zap'],
['Natur', r'\nat'],
['IAUC', r'\iaucirc'],
['ApL', r'\aplett'],
['ASPRv', r'\apspr'],
['BAN', r'\bain'],
['FCPh', r'\fcp'],
['GeCoA', r'\gca'],
['GeoRL', r'\grl'],
['JChPh', r'\jcp'],
['JGR', r'\jgr'],
['JQSRT', r'\jqsrt'],
['MmSAI', r'\memsai'],
['NuPhA', r'\nphysa'],
['PhR', r'\physrep'],
['PhyS', r'\physscr'],
['P&SS', r'\planss'],
['SPIE', r'\procspie'],
['JAVSO', r'\jaavso'],
['PSJ', r'\psj'],
['M&PS', r'\maps'],
['AAS', r'\aas'],
['DPS', r'\dps'],
]
# For SoPh format:
# First element is the journal abbreviation to be output,
# second one is the bibstem to which it applies.
EXPORT_SERVICE_SOPH_JOURNAL_ABBREVIATION = {
'A&A..': 'Astron. Astroph.',
'ApJ..': 'Astrophys. J.',
'SoPh.': 'Solar Phys.',
'GeoRL': 'Geophys. Res. Lett.',
'JGRA.': 'J.Geophys. Res. A',
'JGRB.': 'J.Geophys. Res. B',
'JGRC.': 'J.Geophys. Res. C',
'JGRD.': 'J.Geophys. Res. D',
'JGRE.': 'J.Geophys. Res. E',
}
# Testing Bibcode for GET
EXPORT_SERVICE_TEST_BIBCODE_GET = 'TEST..BIBCODE..GET.'
| # must be here for adsmutils to override it using env vars
# but if left empty (resolving to False) it won't be used
SERVICE_TOKEN = None
# configuration for accessing solr db
# these values can be overwritten by local_config values
# maximum number of records that can be fetched by bigquery is for now 2000
# this can be overwritten to become smaller but it cannot become larger
# cutoff to use query vs bigquery is 100, anything equal and lower calls query, otherwise bigquery is called
EXPORT_SOLR_BIGQUERY_URL = "https://api.adsabs.harvard.edu/v1/search/bigquery"
EXPORT_SERVICE_MAX_RECORDS_SOLR_BIGQUERY = 2000
EXPORT_SOLR_QUERY_URL = "https://api.adsabs.harvard.edu/v1/search/query"
EXPORT_SERVICE_MAX_RECORDS_SOLR_QUERY = 100
# these are used for linkout links
EXPORT_SERVICE_FROM_BBB_URL = 'https://ui.adsabs.harvard.edu/abs'
EXPORT_SERVICE_RESOLVE_URL = "https://ui.adsabs.harvard.edu/link_gateway"
# added to the end of bibTex
EXPORT_SERVICE_ADS_NOTES = 'Provided by the SAO/NASA Astrophysics Data System'
# sort specified by user when they want the service to keep the same order they have specified
# going to be useful when used through the API
# not giong to be implemented from the UI
EXPORT_SERVICE_NO_SORT_SOLR = 'no sort'
# Journal Abbreviations used in the ADS BibTeX entries
# From http://adsabs.harvard.edu/abs_doc/aas_macros.html
# Journal name TeX macro
EXPORT_SERVICE_AASTEX_JOURNAL_MACRO = [
['AJ', r'\aj'],
['ApJ', r'\apj'],
['AcA', r'\actaa'],
['ARA&A', r'\araa'],
['ApJL', r'\apjl'],
['ApJS', r'\apjs'],
['ApOpt', r'\ao'],
['Ap&SS', r'\apss'],
['A&A', r'\aap'],
['A&ARv', r'\aapr'],
['A&AS', r'\aaps'],
['AZh', r'\azh'],
['BAAS', r'\baas'],
['ChA&A', r'\caa'],
['ChJAA', r'\cjaa'],
['Icar', r'\icarus'],
['JCAP', r'\jcap'],
['JRASC', r'\jrasc'],
['MmRAS', r'\memras'],
['MNRAS', r'\mnras'],
['NewA', r'\na'],
['NewAR', r'\nar'],
['PhRvA', r'\pra'],
['PhRvB', r'\prb'],
['PhRvC', r'\prc'],
['PhRvD', r'\prd'],
['PhRvE', r'\pre'],
['PhRvL', r'\prl'],
['PASA', r'\pasa'],
['PASP', r'\pasp'],
['PASJ', r'\pasj'],
['RMxAA', r'\rmxaa'],
['QJRAS', r'\qjras'],
['S&T', r'\skytel'],
['SoPh', r'\solphys'],
['SvA', r'\sovast'],
['SSRv', r'\ssr'],
['ZA', r'\zap'],
['Natur', r'\nat'],
['IAUC', r'\iaucirc'],
['ApL', r'\aplett'],
['ASPRv', r'\apspr'],
['BAN', r'\bain'],
['FCPh', r'\fcp'],
['GeCoA', r'\gca'],
['GeoRL', r'\grl'],
['JChPh', r'\jcp'],
['JGR', r'\jgr'],
['JQSRT', r'\jqsrt'],
['MmSAI', r'\memsai'],
['NuPhA', r'\nphysa'],
['PhR', r'\physrep'],
['PhyS', r'\physscr'],
['P&SS', r'\planss'],
['SPIE', r'\procspie'],
['JAVSO', r'\jaavso'],
['PSJ', r'\psj'],
['M&PS', r'\maps'],
['AAS', r'\aas'],
['DPS', r'\dps'],
]
# For SoPh format:
# First element is the journal abbreviation to be output,
# second one is the bibstem to which it applies.
EXPORT_SERVICE_SOPH_JOURNAL_ABBREVIATION = {
'A&A..': 'Astron. Astroph.',
'ApJ..': 'Astrophys. J.',
'SoPh.': 'Solar Phys.',
'GeoRL': 'Geophys. Res. Lett.',
'JGRA.': 'J.Geophys. Res. A',
'JGRB.': 'J.Geophys. Res. B',
'JGRC.': 'J.Geophys. Res. C',
'JGRD.': 'J.Geophys. Res. D',
'JGRE.': 'J.Geophys. Res. E',
}
# Testing Bibcode for GET
EXPORT_SERVICE_TEST_BIBCODE_GET = 'TEST..BIBCODE..GET.' | en | 0.869372 | # must be here for adsmutils to override it using env vars # but if left empty (resolving to False) it won't be used # configuration for accessing solr db # these values can be overwritten by local_config values # maximum number of records that can be fetched by bigquery is for now 2000 # this can be overwritten to become smaller but it cannot become larger # cutoff to use query vs bigquery is 100, anything equal and lower calls query, otherwise bigquery is called # these are used for linkout links # added to the end of bibTex # sort specified by user when they want the service to keep the same order they have specified # going to be useful when used through the API # not giong to be implemented from the UI # Journal Abbreviations used in the ADS BibTeX entries # From http://adsabs.harvard.edu/abs_doc/aas_macros.html # Journal name TeX macro # For SoPh format: # First element is the journal abbreviation to be output, # second one is the bibstem to which it applies. # Testing Bibcode for GET | 1.784021 | 2 |
src/utils.py | anoir2/amazon-braket-community-detection | 5 | 6632958 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import os
import shutil
from urllib.request import urlopen
from zipfile import ZipFile
from io import BytesIO
def download_graphs(graph_url, data_dir = "./graph_data"):
"""
Download graph .zip files from web URL
:param graph_url: dict, with a format of {'graph_name': 'url'}
:param data_dir: str, the directory path to store graph data
"""
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print("Created ./graph_data directory in local machine to store graph data.")
for graph_name in graph_url.keys():
url = graph_url[graph_name]
with urlopen(url) as zr:
with ZipFile(BytesIO(zr.read())) as zf:
zf.extractall(data_dir)
def clean_graph_data(graph_files, data_dir = "./graph_data"):
"""
Clean graph data by removing header lines
:param graph_files: dict, with a format of {'graph_name': {'file': str, 'lines_to_skip': int}}
:param data_dir: str, the directory path to graph data
"""
for graph_name in graph_files.keys():
# create a subfolder for each graph and save its file with header lines removed
graph_folder = os.path.join(data_dir, graph_name)
if not os.path.exists(graph_folder):
os.makedirs(graph_folder)
raw_file = os.path.join(data_dir, graph_files[graph_name]['file'])
new_file = os.path.join(graph_folder, graph_files[graph_name]['file'])
with open(raw_file, 'r') as f_raw:
data = f_raw.read().splitlines(True)
with open(new_file, 'w') as f_new:
f_new.writelines(data[graph_files[graph_name]['lines_to_skip']:]) | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import os
import shutil
from urllib.request import urlopen
from zipfile import ZipFile
from io import BytesIO
def download_graphs(graph_url, data_dir = "./graph_data"):
"""
Download graph .zip files from web URL
:param graph_url: dict, with a format of {'graph_name': 'url'}
:param data_dir: str, the directory path to store graph data
"""
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print("Created ./graph_data directory in local machine to store graph data.")
for graph_name in graph_url.keys():
url = graph_url[graph_name]
with urlopen(url) as zr:
with ZipFile(BytesIO(zr.read())) as zf:
zf.extractall(data_dir)
def clean_graph_data(graph_files, data_dir = "./graph_data"):
"""
Clean graph data by removing header lines
:param graph_files: dict, with a format of {'graph_name': {'file': str, 'lines_to_skip': int}}
:param data_dir: str, the directory path to graph data
"""
for graph_name in graph_files.keys():
# create a subfolder for each graph and save its file with header lines removed
graph_folder = os.path.join(data_dir, graph_name)
if not os.path.exists(graph_folder):
os.makedirs(graph_folder)
raw_file = os.path.join(data_dir, graph_files[graph_name]['file'])
new_file = os.path.join(graph_folder, graph_files[graph_name]['file'])
with open(raw_file, 'r') as f_raw:
data = f_raw.read().splitlines(True)
with open(new_file, 'w') as f_new:
f_new.writelines(data[graph_files[graph_name]['lines_to_skip']:]) | en | 0.744812 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 Download graph .zip files from web URL :param graph_url: dict, with a format of {'graph_name': 'url'} :param data_dir: str, the directory path to store graph data Clean graph data by removing header lines :param graph_files: dict, with a format of {'graph_name': {'file': str, 'lines_to_skip': int}} :param data_dir: str, the directory path to graph data # create a subfolder for each graph and save its file with header lines removed | 3.34155 | 3 |
src/leetcodepython/tree/construct_string_binary_tree_606.py | zhangyu345293721/leetcode | 90 | 6632959 | <filename>src/leetcodepython/tree/construct_string_binary_tree_606.py
'''
/**
* This is the solution of No. 606 problem in the book <i>Coding Interviews: Questions, Analysis & Solutions</i>,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/construct-string-from-binary-tree
* The description of problem is as follow:
* ==========================================================================================================
* 你需要采用前序遍历的方式,将一个二叉树转换成一个由括号和整数组成的字符串。
* <p>
* 空节点则用一对空括号 "()" 表示。而且你需要省略所有不影响字符串与原始二叉树之间的一对一映射关系的空括号对。
* <p>
* 示例 1:
* <p>
* 输入: 二叉树: [1,2,3,4]
* 1
* / \
* 2 3
* /
* 4
* <p>
* 输出: "1(2(4))(3)"
* <p>
* 解释: 原本将是“1(2(4)())(3())”,
* 在你省略所有不必要的空括号对之后,
* 它将是“1(2(4))(3)”。
* 示例 2:
* <p>
* 输入: 二叉树: [1,2,3,null,4]
* 1
* / \
* 2 3
* \
* 4
* <p>
* 输出: "1(2()(4))(3)"
* <p>
* 解释: 和第一个示例相似,
* 除了我们不能省略第一个对括号来中断输入和输出之间的一对一映射关系。
* <p>
* 来源:力扣(LeetCode)
* 链接:https://leetcode-cn.com/problems/construct-string-from-binary-tree
* 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
* ==========================================================================================================
*
* @author zhangyu (<EMAIL>)
*/
'''
from tree.tree_node import TreeNode
class Solution:
def tree_2_str(self, root: TreeNode) -> str:
'''
将树转成字符串
Args:
root: 二叉树
Returns:
字符串str
'''
if root == None:
return ''
left = str(self.tree_2_str(root.left))
right = str(self.tree_2_str(root.right))
left = '' if len(left) < 1 and len(right) < 1 else '(' + str(left) + ')'
right = '' if len(right) < 1 else '(' + str(right) + ')'
return str(root.val) + left + right
if __name__ == '__main__':
nums = [1, 2, 3, 4]
root = TreeNode.create_binary_tree_array(nums)
solution = Solution()
result = solution.tree_2_str(root)
print(result)
assert result == '1(2(4))(3)'
| <filename>src/leetcodepython/tree/construct_string_binary_tree_606.py
'''
/**
* This is the solution of No. 606 problem in the book <i>Coding Interviews: Questions, Analysis & Solutions</i>,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/construct-string-from-binary-tree
* The description of problem is as follow:
* ==========================================================================================================
* 你需要采用前序遍历的方式,将一个二叉树转换成一个由括号和整数组成的字符串。
* <p>
* 空节点则用一对空括号 "()" 表示。而且你需要省略所有不影响字符串与原始二叉树之间的一对一映射关系的空括号对。
* <p>
* 示例 1:
* <p>
* 输入: 二叉树: [1,2,3,4]
* 1
* / \
* 2 3
* /
* 4
* <p>
* 输出: "1(2(4))(3)"
* <p>
* 解释: 原本将是“1(2(4)())(3())”,
* 在你省略所有不必要的空括号对之后,
* 它将是“1(2(4))(3)”。
* 示例 2:
* <p>
* 输入: 二叉树: [1,2,3,null,4]
* 1
* / \
* 2 3
* \
* 4
* <p>
* 输出: "1(2()(4))(3)"
* <p>
* 解释: 和第一个示例相似,
* 除了我们不能省略第一个对括号来中断输入和输出之间的一对一映射关系。
* <p>
* 来源:力扣(LeetCode)
* 链接:https://leetcode-cn.com/problems/construct-string-from-binary-tree
* 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
* ==========================================================================================================
*
* @author zhangyu (<EMAIL>)
*/
'''
from tree.tree_node import TreeNode
class Solution:
def tree_2_str(self, root: TreeNode) -> str:
'''
将树转成字符串
Args:
root: 二叉树
Returns:
字符串str
'''
if root == None:
return ''
left = str(self.tree_2_str(root.left))
right = str(self.tree_2_str(root.right))
left = '' if len(left) < 1 and len(right) < 1 else '(' + str(left) + ')'
right = '' if len(right) < 1 else '(' + str(right) + ')'
return str(root.val) + left + right
if __name__ == '__main__':
nums = [1, 2, 3, 4]
root = TreeNode.create_binary_tree_array(nums)
solution = Solution()
result = solution.tree_2_str(root)
print(result)
assert result == '1(2(4))(3)'
| zh | 0.587825 | /** * This is the solution of No. 606 problem in the book <i>Coding Interviews: Questions, Analysis & Solutions</i>, * the website of the problem is as follow: * https://leetcode-cn.com/problems/construct-string-from-binary-tree * The description of problem is as follow: * ========================================================================================================== * 你需要采用前序遍历的方式,将一个二叉树转换成一个由括号和整数组成的字符串。 * <p> * 空节点则用一对空括号 "()" 表示。而且你需要省略所有不影响字符串与原始二叉树之间的一对一映射关系的空括号对。 * <p> * 示例 1: * <p> * 输入: 二叉树: [1,2,3,4] * 1 * / \ * 2 3 * / * 4 * <p> * 输出: "1(2(4))(3)" * <p> * 解释: 原本将是“1(2(4)())(3())”, * 在你省略所有不必要的空括号对之后, * 它将是“1(2(4))(3)”。 * 示例 2: * <p> * 输入: 二叉树: [1,2,3,null,4] * 1 * / \ * 2 3 * \ * 4 * <p> * 输出: "1(2()(4))(3)" * <p> * 解释: 和第一个示例相似, * 除了我们不能省略第一个对括号来中断输入和输出之间的一对一映射关系。 * <p> * 来源:力扣(LeetCode) * 链接:https://leetcode-cn.com/problems/construct-string-from-binary-tree * 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 * ========================================================================================================== * * @author zhangyu (<EMAIL>) */ 将树转成字符串 Args: root: 二叉树 Returns: 字符串str | 3.252639 | 3 |
coffin/contrib/loader.py | kazmiruk/coffin | 1 | 6632960 | # -*- coding: utf-8 -*-
"""
A Django template loader wrapper for Coffin that intercepts
requests for "*.jinja" templates, rendering them with Coffin
instead of Django templates.
Usage:
TEMPLATE_LOADERS = (
'coffin.contrib.loader.AppLoader',
'coffin.contrib.loader.FileSystemLoader',
)
"""
from os.path import splitext
from coffin.common import env
from django.conf import settings
from django.template.loaders import app_directories, filesystem
JINJA2_DEFAULT_TEMPLATE_EXTENSION = getattr(settings,
'JINJA2_DEFAULT_TEMPLATE_EXTENSION', ('.jinja',))
if isinstance(JINJA2_DEFAULT_TEMPLATE_EXTENSION, basestring):
JINJA2_DEFAULT_TEMPLATE_EXTENSION = (JINJA2_DEFAULT_TEMPLATE_EXTENSION,)
class LoaderMixin(object):
is_usable = True
def load_template(self, template_name, template_dirs=None):
extension = splitext(template_name)[1]
if not extension in JINJA2_DEFAULT_TEMPLATE_EXTENSION:
return super(LoaderMixin, self).load_template(template_name,
template_dirs)
template = env.get_template(template_name)
return template, template.filename
class FileSystemLoader(LoaderMixin, filesystem.Loader):
pass
class AppLoader(LoaderMixin, app_directories.Loader):
pass
| # -*- coding: utf-8 -*-
"""
A Django template loader wrapper for Coffin that intercepts
requests for "*.jinja" templates, rendering them with Coffin
instead of Django templates.
Usage:
TEMPLATE_LOADERS = (
'coffin.contrib.loader.AppLoader',
'coffin.contrib.loader.FileSystemLoader',
)
"""
from os.path import splitext
from coffin.common import env
from django.conf import settings
from django.template.loaders import app_directories, filesystem
JINJA2_DEFAULT_TEMPLATE_EXTENSION = getattr(settings,
'JINJA2_DEFAULT_TEMPLATE_EXTENSION', ('.jinja',))
if isinstance(JINJA2_DEFAULT_TEMPLATE_EXTENSION, basestring):
JINJA2_DEFAULT_TEMPLATE_EXTENSION = (JINJA2_DEFAULT_TEMPLATE_EXTENSION,)
class LoaderMixin(object):
is_usable = True
def load_template(self, template_name, template_dirs=None):
extension = splitext(template_name)[1]
if not extension in JINJA2_DEFAULT_TEMPLATE_EXTENSION:
return super(LoaderMixin, self).load_template(template_name,
template_dirs)
template = env.get_template(template_name)
return template, template.filename
class FileSystemLoader(LoaderMixin, filesystem.Loader):
pass
class AppLoader(LoaderMixin, app_directories.Loader):
pass
| en | 0.328094 | # -*- coding: utf-8 -*- A Django template loader wrapper for Coffin that intercepts requests for "*.jinja" templates, rendering them with Coffin instead of Django templates. Usage: TEMPLATE_LOADERS = ( 'coffin.contrib.loader.AppLoader', 'coffin.contrib.loader.FileSystemLoader', ) | 2.483237 | 2 |
src/ai.py | linhusp/gomoku-alphabeta | 4 | 6632961 | <gh_stars>1-10
import piece
import numpy as np
from eval_fn import evaluation_state
def get_best_move(state, depth, is_max_state):
values = state.values
best_value = is_max_state and -9999 or 9999
best_move = (-1, -1)
pieces = len(values[values != piece.EMPTY])
if pieces == 0:
return first_move(state)
if pieces == 1:
return second_move(state)
top_moves = get_top_moves(state, 10, is_max_state)
for move_n_value in top_moves:
move = move_n_value[0]
value = minimax(state.next(move),
-10e5,
10e5,
depth - 1,
not is_max_state)
if ((is_max_state and value > best_value)
or (not is_max_state and value < best_value)):
best_value = value
best_move = move
# print(best_move, best_value)
if best_move[0] == -1 and best_move[1] == -1:
return top_moves[0]
return best_move, best_value
def get_top_moves(state, n, is_max_state):
color = state.color
top_moves = []
for move in state.legal_moves():
evaluation = evaluation_state(state.next(move), color)
top_moves.append((move, evaluation))
return sorted(top_moves, key=lambda x: x[1], reverse=is_max_state)[:n]
def minimax(state, alpha, beta, depth, is_max_state):
if depth == 0 or state.is_terminal():
return evaluation_state(state, -state.color)
if is_max_state:
value = -9999
for move in state.legal_moves():
value = max(
value,
minimax(state.next(move), alpha, beta, depth - 1, False)
)
alpha = max(value, alpha)
if alpha >= beta:
break
return value
else:
value = 9999
for move in state.legal_moves():
value = min(
value,
minimax(state.next(move), alpha, beta, depth - 1, True)
)
beta = min(value, beta)
if alpha >= beta:
break
return value
def first_move(state):
x = state.size // 2
return np.random.choice((x - 1, x, x + 1), 2), 1
def second_move(state):
i, j = state.last_move
size = state.size
i2 = i <= size // 2 and 1 or -1
j2 = j <= size // 2 and 1 or -1
return (i + i2, j + j2), 2
| import piece
import numpy as np
from eval_fn import evaluation_state
def get_best_move(state, depth, is_max_state):
values = state.values
best_value = is_max_state and -9999 or 9999
best_move = (-1, -1)
pieces = len(values[values != piece.EMPTY])
if pieces == 0:
return first_move(state)
if pieces == 1:
return second_move(state)
top_moves = get_top_moves(state, 10, is_max_state)
for move_n_value in top_moves:
move = move_n_value[0]
value = minimax(state.next(move),
-10e5,
10e5,
depth - 1,
not is_max_state)
if ((is_max_state and value > best_value)
or (not is_max_state and value < best_value)):
best_value = value
best_move = move
# print(best_move, best_value)
if best_move[0] == -1 and best_move[1] == -1:
return top_moves[0]
return best_move, best_value
def get_top_moves(state, n, is_max_state):
color = state.color
top_moves = []
for move in state.legal_moves():
evaluation = evaluation_state(state.next(move), color)
top_moves.append((move, evaluation))
return sorted(top_moves, key=lambda x: x[1], reverse=is_max_state)[:n]
def minimax(state, alpha, beta, depth, is_max_state):
if depth == 0 or state.is_terminal():
return evaluation_state(state, -state.color)
if is_max_state:
value = -9999
for move in state.legal_moves():
value = max(
value,
minimax(state.next(move), alpha, beta, depth - 1, False)
)
alpha = max(value, alpha)
if alpha >= beta:
break
return value
else:
value = 9999
for move in state.legal_moves():
value = min(
value,
minimax(state.next(move), alpha, beta, depth - 1, True)
)
beta = min(value, beta)
if alpha >= beta:
break
return value
def first_move(state):
x = state.size // 2
return np.random.choice((x - 1, x, x + 1), 2), 1
def second_move(state):
i, j = state.last_move
size = state.size
i2 = i <= size // 2 and 1 or -1
j2 = j <= size // 2 and 1 or -1
return (i + i2, j + j2), 2 | my | 0.060438 | # print(best_move, best_value) | 3.129751 | 3 |
spec/behavior_pyspec_localization.py | jyotijaya/pyspec | 1 | 6632962 | <reponame>jyotijaya/pyspec
# -*- coding: utf-8 -*-
import sys, os
parent_path = os.path.split(os.path.abspath("."))[0]
if parent_path not in sys.path:
sys.path.insert(0, parent_path)
from pyspec import *
from pyspec.mockobject import *
import pyspec.framework
import pyspec.embedded.setting as setting
class Behavior_Setting_for_Localization(object):
@context(group=1)
def a_default_config(self):
self.config = setting.PySpecConfig()
@spec(group=1)
def should_have_english_locale_as_default(self):
About(self.config.language.code).should_equal('en')
@spec(group=1)
def should_have_supported_language(self):
About(self.config.language.support).should_include('en')
About(self.config.language.support).should_include('ja')
@context(group=2)
def a_config_that_was_set_valid_language(self):
self.config = setting.PySpecConfig()
self.config.language.set_language('ja')
@spec(group=2)
def can_accept_it(self):
About(self.config.language.code).should_equal('ja')
@context(group=3)
def a_config_that_was_set_invalid_language(self):
self.config = setting.PySpecConfig()
# pyspec can't accept tlhIngan Hol!
self.config.language.set_language('tlh')
@spec(group=3)
def should_not_change_language(self):
About(self.config.language.code).should_equal('en')
@context(group=4)
def should_equal_fail_message_in_english(self):
config = setting.PySpecConfig()
self.english_message = config.language.get('should_equal',
'fail', variable_name='age',
expected_value='27',
actual_value='29')
@spec(group=4)
def pyspec_can_generete_it(self):
About(self.english_message).should_equal('age should equal 27, but was 29.')
if __name__ == "__main__":
run_test()
| # -*- coding: utf-8 -*-
import sys, os
parent_path = os.path.split(os.path.abspath("."))[0]
if parent_path not in sys.path:
sys.path.insert(0, parent_path)
from pyspec import *
from pyspec.mockobject import *
import pyspec.framework
import pyspec.embedded.setting as setting
class Behavior_Setting_for_Localization(object):
@context(group=1)
def a_default_config(self):
self.config = setting.PySpecConfig()
@spec(group=1)
def should_have_english_locale_as_default(self):
About(self.config.language.code).should_equal('en')
@spec(group=1)
def should_have_supported_language(self):
About(self.config.language.support).should_include('en')
About(self.config.language.support).should_include('ja')
@context(group=2)
def a_config_that_was_set_valid_language(self):
self.config = setting.PySpecConfig()
self.config.language.set_language('ja')
@spec(group=2)
def can_accept_it(self):
About(self.config.language.code).should_equal('ja')
@context(group=3)
def a_config_that_was_set_invalid_language(self):
self.config = setting.PySpecConfig()
# pyspec can't accept tlhIngan Hol!
self.config.language.set_language('tlh')
@spec(group=3)
def should_not_change_language(self):
About(self.config.language.code).should_equal('en')
@context(group=4)
def should_equal_fail_message_in_english(self):
config = setting.PySpecConfig()
self.english_message = config.language.get('should_equal',
'fail', variable_name='age',
expected_value='27',
actual_value='29')
@spec(group=4)
def pyspec_can_generete_it(self):
About(self.english_message).should_equal('age should equal 27, but was 29.')
if __name__ == "__main__":
run_test() | en | 0.795189 | # -*- coding: utf-8 -*- # pyspec can't accept tlhIngan Hol! | 2.169847 | 2 |
heekscnc/nc/hpgl3d_read.py | JohnyEngine/CNC | 0 | 6632963 | <filename>heekscnc/nc/hpgl3d_read.py<gh_stars>0
import num_reader
import sys
import math
class Parser(num_reader.NumReader):
def __init__(self, writer):
num_reader.NumReader.__init__(self, writer)
self.x = 0
self.y = 0
self.z = 10000
self.f = 0
self.units_to_mm = 0.01
def ParseV(self):
self.line_index = self.line_index + 1
f = self.get_number()
if len(f) > 0:
self.f = float(f)
self.add_word("prep")
def ParseZ(self):
self.line_index = self.line_index + 1
x = self.get_number()
if len(x) > 0:
y = self.get_number()
if len(y) > 0:
z = self.get_number()
if len(z) > 0:
if self.f > 40: color = "rapid"
else: color = "feed"
self.add_word(color)
self.writer.begin_path(color)
self.writer.add_line(int(x) * self.units_to_mm, int(y) * self.units_to_mm, int(z) * self.units_to_mm)
self.writer.end_path()
self.x = int(x)
self.y = int(y)
self.z = int(z)
def ParseFromFirstLetter(self, c):
if c == 'Z':
self.ParseZ()
elif c == 'V':
self.ParseV()
| <filename>heekscnc/nc/hpgl3d_read.py<gh_stars>0
import num_reader
import sys
import math
class Parser(num_reader.NumReader):
def __init__(self, writer):
num_reader.NumReader.__init__(self, writer)
self.x = 0
self.y = 0
self.z = 10000
self.f = 0
self.units_to_mm = 0.01
def ParseV(self):
self.line_index = self.line_index + 1
f = self.get_number()
if len(f) > 0:
self.f = float(f)
self.add_word("prep")
def ParseZ(self):
self.line_index = self.line_index + 1
x = self.get_number()
if len(x) > 0:
y = self.get_number()
if len(y) > 0:
z = self.get_number()
if len(z) > 0:
if self.f > 40: color = "rapid"
else: color = "feed"
self.add_word(color)
self.writer.begin_path(color)
self.writer.add_line(int(x) * self.units_to_mm, int(y) * self.units_to_mm, int(z) * self.units_to_mm)
self.writer.end_path()
self.x = int(x)
self.y = int(y)
self.z = int(z)
def ParseFromFirstLetter(self, c):
if c == 'Z':
self.ParseZ()
elif c == 'V':
self.ParseV()
| none | 1 | 2.953043 | 3 |
|
mdpo/io.py | dingyifei/mdpo | 8 | 6632964 | """mdpo I/O utilities."""
import glob
import hashlib
import os
import re
def filter_paths(filepaths, ignore_paths=[]):
"""Filters a list of paths removing those defined in other list of paths.
The paths to filter can be defined in the list of paths to ignore in
several forms:
- The same string.
- Only the file name.
- Only their direct directory name.
- Their direct directory full path.
Args:
filepaths (list): Set of source paths to filter.
ignore_paths (list): Paths that must not be included in the response.
Returns:
list: Non filtered paths ordered alphabetically.
"""
response = []
for filepath in filepaths:
# ignore by filename
if os.path.basename(filepath) in ignore_paths:
continue
# ignore by dirname
if os.path.basename(os.path.dirname(filepath)) in ignore_paths:
continue
# ignore by filepath
if filepath in ignore_paths:
continue
# ignore by dirpath (relative or absolute)
if (os.sep).join(filepath.split(os.sep)[:-1]) in ignore_paths:
continue
response.append(filepath)
response.sort()
return response
def to_file_content_if_is_file(value, encoding='utf-8'):
"""Check if the value passed is a file path or string content.
If is a file, reads its content and returns it, otherwise returns
the string passed as is.
Args:
value (str): Value to check if is a filepath or content.
encoding (str): Expected file encoding, if is a file.
Returns:
str: File content if ``value`` is an existing file or ``value`` as is.
"""
if os.path.isfile(value):
with open(value, encoding=encoding) as f:
value = f.read()
return value
def to_glob_or_content(value):
"""Check if the value passed is a glob or is string content.
Args:
value (str): Value to check if is a glob or content.
Returns:
list: Two values being the first a boolean that indicates if ``value``
is a glob (``True``) or content (``False``) and the second value
is the content (parsed as glob is first value is ``True``).
"""
try:
parsed = glob.glob(value)
except re.error:
# some strings like '[s-m]' will produce
# 're.error: bad character range ... at position'
return (False, value)
if not parsed:
# assumes it is content
return (False, value)
return (True, parsed)
def filehash(filepath):
"""Compute the hash of a file.
Args:
filepath (str): Path to the file.
"""
hasher = hashlib.md5()
with open(filepath, 'rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
def save_file_checking_file_changed(filepath, content, encoding='utf-8'):
"""Save a file checking if the content has changed.
Args:
pofile (:py:class:`polib.POFile`): POFile to save.
po_filepath (str): Path to the new file to save in.
Returns:
bool: If the PO file content has been changed.
"""
if not os.path.isfile(filepath):
with open(filepath, 'w', encoding=encoding) as f:
f.write(content)
return True
pre_hash = filehash(filepath)
with open(filepath, 'w', encoding=encoding) as f:
f.write(content)
post_hash = filehash(filepath)
return pre_hash != post_hash
| """mdpo I/O utilities."""
import glob
import hashlib
import os
import re
def filter_paths(filepaths, ignore_paths=[]):
"""Filters a list of paths removing those defined in other list of paths.
The paths to filter can be defined in the list of paths to ignore in
several forms:
- The same string.
- Only the file name.
- Only their direct directory name.
- Their direct directory full path.
Args:
filepaths (list): Set of source paths to filter.
ignore_paths (list): Paths that must not be included in the response.
Returns:
list: Non filtered paths ordered alphabetically.
"""
response = []
for filepath in filepaths:
# ignore by filename
if os.path.basename(filepath) in ignore_paths:
continue
# ignore by dirname
if os.path.basename(os.path.dirname(filepath)) in ignore_paths:
continue
# ignore by filepath
if filepath in ignore_paths:
continue
# ignore by dirpath (relative or absolute)
if (os.sep).join(filepath.split(os.sep)[:-1]) in ignore_paths:
continue
response.append(filepath)
response.sort()
return response
def to_file_content_if_is_file(value, encoding='utf-8'):
"""Check if the value passed is a file path or string content.
If is a file, reads its content and returns it, otherwise returns
the string passed as is.
Args:
value (str): Value to check if is a filepath or content.
encoding (str): Expected file encoding, if is a file.
Returns:
str: File content if ``value`` is an existing file or ``value`` as is.
"""
if os.path.isfile(value):
with open(value, encoding=encoding) as f:
value = f.read()
return value
def to_glob_or_content(value):
"""Check if the value passed is a glob or is string content.
Args:
value (str): Value to check if is a glob or content.
Returns:
list: Two values being the first a boolean that indicates if ``value``
is a glob (``True``) or content (``False``) and the second value
is the content (parsed as glob is first value is ``True``).
"""
try:
parsed = glob.glob(value)
except re.error:
# some strings like '[s-m]' will produce
# 're.error: bad character range ... at position'
return (False, value)
if not parsed:
# assumes it is content
return (False, value)
return (True, parsed)
def filehash(filepath):
"""Compute the hash of a file.
Args:
filepath (str): Path to the file.
"""
hasher = hashlib.md5()
with open(filepath, 'rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
def save_file_checking_file_changed(filepath, content, encoding='utf-8'):
"""Save a file checking if the content has changed.
Args:
pofile (:py:class:`polib.POFile`): POFile to save.
po_filepath (str): Path to the new file to save in.
Returns:
bool: If the PO file content has been changed.
"""
if not os.path.isfile(filepath):
with open(filepath, 'w', encoding=encoding) as f:
f.write(content)
return True
pre_hash = filehash(filepath)
with open(filepath, 'w', encoding=encoding) as f:
f.write(content)
post_hash = filehash(filepath)
return pre_hash != post_hash
| en | 0.814168 | mdpo I/O utilities. Filters a list of paths removing those defined in other list of paths. The paths to filter can be defined in the list of paths to ignore in several forms: - The same string. - Only the file name. - Only their direct directory name. - Their direct directory full path. Args: filepaths (list): Set of source paths to filter. ignore_paths (list): Paths that must not be included in the response. Returns: list: Non filtered paths ordered alphabetically. # ignore by filename # ignore by dirname # ignore by filepath # ignore by dirpath (relative or absolute) Check if the value passed is a file path or string content. If is a file, reads its content and returns it, otherwise returns the string passed as is. Args: value (str): Value to check if is a filepath or content. encoding (str): Expected file encoding, if is a file. Returns: str: File content if ``value`` is an existing file or ``value`` as is. Check if the value passed is a glob or is string content. Args: value (str): Value to check if is a glob or content. Returns: list: Two values being the first a boolean that indicates if ``value`` is a glob (``True``) or content (``False``) and the second value is the content (parsed as glob is first value is ``True``). # some strings like '[s-m]' will produce # 're.error: bad character range ... at position' # assumes it is content Compute the hash of a file. Args: filepath (str): Path to the file. Save a file checking if the content has changed. Args: pofile (:py:class:`polib.POFile`): POFile to save. po_filepath (str): Path to the new file to save in. Returns: bool: If the PO file content has been changed. | 4.001352 | 4 |
pytorch_widedeep/models/tabular/transformers/tab_perceiver.py | TangleSpace/pytorch-widedeep | 0 | 6632965 | import torch
import einops
from torch import nn
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.models.tabular.mlp._layers import MLP
from pytorch_widedeep.models.tabular._base_tabular_model import (
BaseTabularModelWithAttention,
)
from pytorch_widedeep.models.tabular.transformers._encoders import (
PerceiverEncoder,
)
class TabPerceiver(BaseTabularModelWithAttention):
r"""Defines an adaptation of a `Perceiver model
<https://arxiv.org/abs/2103.03206>`_ that can be used as the
``deeptabular`` component of a Wide & Deep model or independently by
itself.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
{'education': 0, 'relationship': 1, 'workclass': 2, ...}
cat_embed_input: List, Optional, default = None
List of Tuples with the column name and number of unique values for
each categorical component e.g. [(education, 11), ...]
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. `'tanh'`,
`'relu'`, `'leaky_relu'` and `'gelu'` are supported.
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
:obj:`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.
If ``full_embed_dropout = True``, ``cat_embed_dropout`` is ignored.
shared_embed: bool, default = False
The idea behind ``shared_embed`` is described in the Appendix A in the
`TabTransformer paper <https://arxiv.org/abs/2012.06678>`_: `'The
goal of having column embedding is to enable the model to distinguish
the classes in one column from those in the other columns'`. In other
words, the idea is to let the model learn which column is embedded
at the time.
add_shared_embed: bool, default = False,
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
``frac_shared_embed`` with the shared embeddings.
See :obj:`pytorch_widedeep.models.transformers._layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if ``add_shared_embed
= False``) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: 'layernorm', 'batchnorm' or None.
cont_embed_dropout: float, default = 0.1,
Continuous embeddings dropout
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: str, default = None
Activation function to be applied to the continuous embeddings, if
any. `'tanh'`, `'relu'`, `'leaky_relu'` and `'gelu'` are supported.
input_dim: int, default = 32
The so-called *dimension of the model*. Is the number of embeddings
used to encode the categorical and/or continuous columns.
n_cross_attns: int, default = 1
Number of times each perceiver block will cross attend to the input
data (i.e. number of cross attention components per perceiver block).
This should normally be 1. However, in the paper they describe some
architectures (normally computer vision-related problems) where the
Perceiver attends multiple times to the input array. Therefore, maybe
multiple cross attention to the input array is also useful in some
cases for tabular data
n_cross_attn_heads: int, default = 4
Number of attention heads for the cross attention component
n_latents: int, default = 16
Number of latents. This is the *N* parameter in the paper. As
indicated in the paper, this number should be significantly lower
than *M* (the number of columns in the dataset). Setting *N* closer
to *M* defies the main purpose of the Perceiver, which is to overcome
the transformer quadratic bottleneck
latent_dim: int, default = 128
Latent dimension.
n_latent_heads: int, default = 4
Number of attention heads per Latent Transformer
n_latent_blocks: int, default = 4
Number of transformer encoder blocks (normalised MHA + normalised FF)
per Latent Transformer
n_perceiver_blocks: int, default = 4
Number of Perceiver blocks defined as [Cross Attention + Latent
Transformer]
share_weights: Boolean, default = False
Boolean indicating if the weights will be shared between Perceiver
blocks
attn_dropout: float, default = 0.2
Dropout that will be applied to the Multi-Head Attention layers
ff_dropout: float, default = 0.1
Dropout that will be applied to the FeedForward network
transformer_activation: str, default = "gelu"
Transformer Encoder activation function. `'tanh'`, `'relu'`,
`'leaky_relu'`, `'gelu'`, `'geglu'` and `'reglu'` are supported
mlp_hidden_dims: List, Optional, default = None
MLP hidden dimensions. If not provided it will default to ``[l, 4*l,
2*l]`` where ``l`` is the MLP's input dimension
mlp_activation: str, default = "relu"
MLP activation function. `'tanh'`, `'relu'`, `'leaky_relu'` and
`'gelu'` are supported
mlp_dropout: float, default = 0.1
Dropout that will be applied to the final MLP
mlp_batchnorm: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP ->
LIN -> ACT]``
Attributes
----------
cat_and_cont_embed: ``nn.Module``
This is the module that processes the categorical and continuous columns
perceiver_blks: ``nn.ModuleDict``
ModuleDict with the Perceiver blocks
latents: ``nn.Parameter``
Latents that will be used for prediction
perceiver_mlp: ``nn.Module``
MLP component in the model
output_dim: int
The output dimension of the model. This is a required attribute
neccesary to build the ``WideDeep`` class
Example
--------
>>> import torch
>>> from pytorch_widedeep.models import TabPerceiver
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]
>>> continuous_cols = ['e']
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = TabPerceiver(column_idx=column_idx, cat_embed_input=cat_embed_input,
... continuous_cols=continuous_cols, n_latents=2, latent_dim=16,
... n_perceiver_blocks=2)
>>> out = model(X_tab)
"""
def __init__(
self,
column_idx: Dict[str, int],
cat_embed_input: Optional[List[Tuple[str, int]]] = None,
cat_embed_dropout: float = 0.1,
use_cat_bias: bool = False,
cat_embed_activation: Optional[str] = None,
full_embed_dropout: bool = False,
shared_embed: bool = False,
add_shared_embed: bool = False,
frac_shared_embed: float = 0.25,
continuous_cols: Optional[List[str]] = None,
cont_norm_layer: str = None,
cont_embed_dropout: float = 0.1,
use_cont_bias: bool = True,
cont_embed_activation: Optional[str] = None,
input_dim: int = 32,
n_cross_attns: int = 1,
n_cross_attn_heads: int = 4,
n_latents: int = 16,
latent_dim: int = 128,
n_latent_heads: int = 4,
n_latent_blocks: int = 4,
n_perceiver_blocks: int = 4,
share_weights: bool = False,
attn_dropout: float = 0.1,
ff_dropout: float = 0.1,
transformer_activation: str = "geglu",
mlp_hidden_dims: Optional[List[int]] = None,
mlp_activation: str = "relu",
mlp_dropout: float = 0.1,
mlp_batchnorm: bool = False,
mlp_batchnorm_last: bool = False,
mlp_linear_first: bool = True,
):
super(TabPerceiver, self).__init__(
column_idx=column_idx,
cat_embed_input=cat_embed_input,
cat_embed_dropout=cat_embed_dropout,
use_cat_bias=use_cat_bias,
cat_embed_activation=cat_embed_activation,
full_embed_dropout=full_embed_dropout,
shared_embed=shared_embed,
add_shared_embed=add_shared_embed,
frac_shared_embed=frac_shared_embed,
continuous_cols=continuous_cols,
cont_norm_layer=cont_norm_layer,
embed_continuous=True,
cont_embed_dropout=cont_embed_dropout,
use_cont_bias=use_cont_bias,
cont_embed_activation=cont_embed_activation,
input_dim=input_dim,
)
self.n_cross_attns = n_cross_attns
self.n_cross_attn_heads = n_cross_attn_heads
self.n_latents = n_latents
self.latent_dim = latent_dim
self.n_latent_heads = n_latent_heads
self.n_latent_blocks = n_latent_blocks
self.n_perceiver_blocks = n_perceiver_blocks
self.share_weights = share_weights
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.transformer_activation = transformer_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
# Embeddings are instantiated at the base model
# Transformer blocks
self.latents = nn.init.trunc_normal_(
nn.Parameter(torch.empty(n_latents, latent_dim))
)
self.perceiver_blks = nn.ModuleDict()
first_perceiver_block = self._build_perceiver_block()
self.perceiver_blks["perceiver_block0"] = first_perceiver_block
if share_weights:
for n in range(1, n_perceiver_blocks):
self.perceiver_blks["perceiver_block" + str(n)] = first_perceiver_block
else:
for n in range(1, n_perceiver_blocks):
self.perceiver_blks[
"perceiver_block" + str(n)
] = self._build_perceiver_block()
# Mlp
if not mlp_hidden_dims:
self.mlp_hidden_dims = [latent_dim, latent_dim * 4, latent_dim * 2]
else:
self.mlp_hidden_dims = [latent_dim] + mlp_hidden_dims
self.perceiver_mlp = MLP(
self.mlp_hidden_dims,
mlp_activation,
mlp_dropout,
mlp_batchnorm,
mlp_batchnorm_last,
mlp_linear_first,
)
# the output_dim attribute will be used as input_dim when "merging" the models
self.output_dim: int = self.mlp_hidden_dims[-1]
def forward(self, X: Tensor) -> Tensor:
x_emb = self._get_embeddings(X)
x = einops.repeat(self.latents, "n d -> b n d", b=X.shape[0])
for n in range(self.n_perceiver_blocks):
cross_attns = self.perceiver_blks["perceiver_block" + str(n)]["cross_attns"]
latent_transformer = self.perceiver_blks["perceiver_block" + str(n)][
"latent_transformer"
]
for cross_attn in cross_attns:
x = cross_attn(x, x_emb)
x = latent_transformer(x)
# average along the latent index axis
x = x.mean(dim=1)
return self.perceiver_mlp(x)
@property
def attention_weights(self) -> List:
r"""List with the attention weights. If the weights are not shared
between perceiver blocks each element of the list will be a list
itself containing the Cross Attention and Latent Transformer
attention weights respectively
The shape of the attention weights is:
- Cross Attention: :math:`(N, C, L, F)`
- Latent Attention: :math:`(N, T, L, L)`
WHere *N* is the batch size, *C* is the number of Cross Attention
heads, *L* is the number of Latents, *F* is the number of
features/columns in the dataset and *T* is the number of Latent
Attention heads
"""
if self.share_weights:
cross_attns = self.perceiver_blks["perceiver_block0"]["cross_attns"]
latent_transformer = self.perceiver_blks["perceiver_block0"][
"latent_transformer"
]
attention_weights = self._extract_attn_weights(
cross_attns, latent_transformer
)
else:
attention_weights = []
for n in range(self.n_perceiver_blocks):
cross_attns = self.perceiver_blks["perceiver_block" + str(n)][
"cross_attns"
]
latent_transformer = self.perceiver_blks["perceiver_block" + str(n)][
"latent_transformer"
]
attention_weights.append(
self._extract_attn_weights(cross_attns, latent_transformer)
)
return attention_weights
def _build_perceiver_block(self) -> nn.ModuleDict:
perceiver_block = nn.ModuleDict()
# Cross Attention
cross_attns = nn.ModuleList()
for _ in range(self.n_cross_attns):
cross_attns.append(
PerceiverEncoder(
self.input_dim,
self.n_cross_attn_heads,
False, # use_bias
self.attn_dropout,
self.ff_dropout,
self.transformer_activation,
self.latent_dim, # q_dim,
),
)
perceiver_block["cross_attns"] = cross_attns
# Latent Transformer
latent_transformer = nn.Sequential()
for i in range(self.n_latent_blocks):
latent_transformer.add_module(
"latent_block" + str(i),
PerceiverEncoder(
self.latent_dim, # input_dim
self.n_latent_heads,
False, # use_bias
self.attn_dropout,
self.ff_dropout,
self.transformer_activation,
),
)
perceiver_block["latent_transformer"] = latent_transformer
return perceiver_block
@staticmethod
def _extract_attn_weights(cross_attns, latent_transformer) -> List:
attention_weights = []
for cross_attn in cross_attns:
attention_weights.append(cross_attn.attn.attn_weights)
for latent_block in latent_transformer:
attention_weights.append(latent_block.attn.attn_weights)
return attention_weights
| import torch
import einops
from torch import nn
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.models.tabular.mlp._layers import MLP
from pytorch_widedeep.models.tabular._base_tabular_model import (
BaseTabularModelWithAttention,
)
from pytorch_widedeep.models.tabular.transformers._encoders import (
PerceiverEncoder,
)
class TabPerceiver(BaseTabularModelWithAttention):
r"""Defines an adaptation of a `Perceiver model
<https://arxiv.org/abs/2103.03206>`_ that can be used as the
``deeptabular`` component of a Wide & Deep model or independently by
itself.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
{'education': 0, 'relationship': 1, 'workclass': 2, ...}
cat_embed_input: List, Optional, default = None
List of Tuples with the column name and number of unique values for
each categorical component e.g. [(education, 11), ...]
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. `'tanh'`,
`'relu'`, `'leaky_relu'` and `'gelu'` are supported.
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
:obj:`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.
If ``full_embed_dropout = True``, ``cat_embed_dropout`` is ignored.
shared_embed: bool, default = False
The idea behind ``shared_embed`` is described in the Appendix A in the
`TabTransformer paper <https://arxiv.org/abs/2012.06678>`_: `'The
goal of having column embedding is to enable the model to distinguish
the classes in one column from those in the other columns'`. In other
words, the idea is to let the model learn which column is embedded
at the time.
add_shared_embed: bool, default = False,
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
``frac_shared_embed`` with the shared embeddings.
See :obj:`pytorch_widedeep.models.transformers._layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if ``add_shared_embed
= False``) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: 'layernorm', 'batchnorm' or None.
cont_embed_dropout: float, default = 0.1,
Continuous embeddings dropout
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: str, default = None
Activation function to be applied to the continuous embeddings, if
any. `'tanh'`, `'relu'`, `'leaky_relu'` and `'gelu'` are supported.
input_dim: int, default = 32
The so-called *dimension of the model*. Is the number of embeddings
used to encode the categorical and/or continuous columns.
n_cross_attns: int, default = 1
Number of times each perceiver block will cross attend to the input
data (i.e. number of cross attention components per perceiver block).
This should normally be 1. However, in the paper they describe some
architectures (normally computer vision-related problems) where the
Perceiver attends multiple times to the input array. Therefore, maybe
multiple cross attention to the input array is also useful in some
cases for tabular data
n_cross_attn_heads: int, default = 4
Number of attention heads for the cross attention component
n_latents: int, default = 16
Number of latents. This is the *N* parameter in the paper. As
indicated in the paper, this number should be significantly lower
than *M* (the number of columns in the dataset). Setting *N* closer
to *M* defies the main purpose of the Perceiver, which is to overcome
the transformer quadratic bottleneck
latent_dim: int, default = 128
Latent dimension.
n_latent_heads: int, default = 4
Number of attention heads per Latent Transformer
n_latent_blocks: int, default = 4
Number of transformer encoder blocks (normalised MHA + normalised FF)
per Latent Transformer
n_perceiver_blocks: int, default = 4
Number of Perceiver blocks defined as [Cross Attention + Latent
Transformer]
share_weights: Boolean, default = False
Boolean indicating if the weights will be shared between Perceiver
blocks
attn_dropout: float, default = 0.2
Dropout that will be applied to the Multi-Head Attention layers
ff_dropout: float, default = 0.1
Dropout that will be applied to the FeedForward network
transformer_activation: str, default = "gelu"
Transformer Encoder activation function. `'tanh'`, `'relu'`,
`'leaky_relu'`, `'gelu'`, `'geglu'` and `'reglu'` are supported
mlp_hidden_dims: List, Optional, default = None
MLP hidden dimensions. If not provided it will default to ``[l, 4*l,
2*l]`` where ``l`` is the MLP's input dimension
mlp_activation: str, default = "relu"
MLP activation function. `'tanh'`, `'relu'`, `'leaky_relu'` and
`'gelu'` are supported
mlp_dropout: float, default = 0.1
Dropout that will be applied to the final MLP
mlp_batchnorm: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP ->
LIN -> ACT]``
Attributes
----------
cat_and_cont_embed: ``nn.Module``
This is the module that processes the categorical and continuous columns
perceiver_blks: ``nn.ModuleDict``
ModuleDict with the Perceiver blocks
latents: ``nn.Parameter``
Latents that will be used for prediction
perceiver_mlp: ``nn.Module``
MLP component in the model
output_dim: int
The output dimension of the model. This is a required attribute
neccesary to build the ``WideDeep`` class
Example
--------
>>> import torch
>>> from pytorch_widedeep.models import TabPerceiver
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]
>>> continuous_cols = ['e']
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = TabPerceiver(column_idx=column_idx, cat_embed_input=cat_embed_input,
... continuous_cols=continuous_cols, n_latents=2, latent_dim=16,
... n_perceiver_blocks=2)
>>> out = model(X_tab)
"""
def __init__(
self,
column_idx: Dict[str, int],
cat_embed_input: Optional[List[Tuple[str, int]]] = None,
cat_embed_dropout: float = 0.1,
use_cat_bias: bool = False,
cat_embed_activation: Optional[str] = None,
full_embed_dropout: bool = False,
shared_embed: bool = False,
add_shared_embed: bool = False,
frac_shared_embed: float = 0.25,
continuous_cols: Optional[List[str]] = None,
cont_norm_layer: str = None,
cont_embed_dropout: float = 0.1,
use_cont_bias: bool = True,
cont_embed_activation: Optional[str] = None,
input_dim: int = 32,
n_cross_attns: int = 1,
n_cross_attn_heads: int = 4,
n_latents: int = 16,
latent_dim: int = 128,
n_latent_heads: int = 4,
n_latent_blocks: int = 4,
n_perceiver_blocks: int = 4,
share_weights: bool = False,
attn_dropout: float = 0.1,
ff_dropout: float = 0.1,
transformer_activation: str = "geglu",
mlp_hidden_dims: Optional[List[int]] = None,
mlp_activation: str = "relu",
mlp_dropout: float = 0.1,
mlp_batchnorm: bool = False,
mlp_batchnorm_last: bool = False,
mlp_linear_first: bool = True,
):
super(TabPerceiver, self).__init__(
column_idx=column_idx,
cat_embed_input=cat_embed_input,
cat_embed_dropout=cat_embed_dropout,
use_cat_bias=use_cat_bias,
cat_embed_activation=cat_embed_activation,
full_embed_dropout=full_embed_dropout,
shared_embed=shared_embed,
add_shared_embed=add_shared_embed,
frac_shared_embed=frac_shared_embed,
continuous_cols=continuous_cols,
cont_norm_layer=cont_norm_layer,
embed_continuous=True,
cont_embed_dropout=cont_embed_dropout,
use_cont_bias=use_cont_bias,
cont_embed_activation=cont_embed_activation,
input_dim=input_dim,
)
self.n_cross_attns = n_cross_attns
self.n_cross_attn_heads = n_cross_attn_heads
self.n_latents = n_latents
self.latent_dim = latent_dim
self.n_latent_heads = n_latent_heads
self.n_latent_blocks = n_latent_blocks
self.n_perceiver_blocks = n_perceiver_blocks
self.share_weights = share_weights
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.transformer_activation = transformer_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
# Embeddings are instantiated at the base model
# Transformer blocks
self.latents = nn.init.trunc_normal_(
nn.Parameter(torch.empty(n_latents, latent_dim))
)
self.perceiver_blks = nn.ModuleDict()
first_perceiver_block = self._build_perceiver_block()
self.perceiver_blks["perceiver_block0"] = first_perceiver_block
if share_weights:
for n in range(1, n_perceiver_blocks):
self.perceiver_blks["perceiver_block" + str(n)] = first_perceiver_block
else:
for n in range(1, n_perceiver_blocks):
self.perceiver_blks[
"perceiver_block" + str(n)
] = self._build_perceiver_block()
# Mlp
if not mlp_hidden_dims:
self.mlp_hidden_dims = [latent_dim, latent_dim * 4, latent_dim * 2]
else:
self.mlp_hidden_dims = [latent_dim] + mlp_hidden_dims
self.perceiver_mlp = MLP(
self.mlp_hidden_dims,
mlp_activation,
mlp_dropout,
mlp_batchnorm,
mlp_batchnorm_last,
mlp_linear_first,
)
# the output_dim attribute will be used as input_dim when "merging" the models
self.output_dim: int = self.mlp_hidden_dims[-1]
def forward(self, X: Tensor) -> Tensor:
x_emb = self._get_embeddings(X)
x = einops.repeat(self.latents, "n d -> b n d", b=X.shape[0])
for n in range(self.n_perceiver_blocks):
cross_attns = self.perceiver_blks["perceiver_block" + str(n)]["cross_attns"]
latent_transformer = self.perceiver_blks["perceiver_block" + str(n)][
"latent_transformer"
]
for cross_attn in cross_attns:
x = cross_attn(x, x_emb)
x = latent_transformer(x)
# average along the latent index axis
x = x.mean(dim=1)
return self.perceiver_mlp(x)
@property
def attention_weights(self) -> List:
r"""List with the attention weights. If the weights are not shared
between perceiver blocks each element of the list will be a list
itself containing the Cross Attention and Latent Transformer
attention weights respectively
The shape of the attention weights is:
- Cross Attention: :math:`(N, C, L, F)`
- Latent Attention: :math:`(N, T, L, L)`
WHere *N* is the batch size, *C* is the number of Cross Attention
heads, *L* is the number of Latents, *F* is the number of
features/columns in the dataset and *T* is the number of Latent
Attention heads
"""
if self.share_weights:
cross_attns = self.perceiver_blks["perceiver_block0"]["cross_attns"]
latent_transformer = self.perceiver_blks["perceiver_block0"][
"latent_transformer"
]
attention_weights = self._extract_attn_weights(
cross_attns, latent_transformer
)
else:
attention_weights = []
for n in range(self.n_perceiver_blocks):
cross_attns = self.perceiver_blks["perceiver_block" + str(n)][
"cross_attns"
]
latent_transformer = self.perceiver_blks["perceiver_block" + str(n)][
"latent_transformer"
]
attention_weights.append(
self._extract_attn_weights(cross_attns, latent_transformer)
)
return attention_weights
def _build_perceiver_block(self) -> nn.ModuleDict:
perceiver_block = nn.ModuleDict()
# Cross Attention
cross_attns = nn.ModuleList()
for _ in range(self.n_cross_attns):
cross_attns.append(
PerceiverEncoder(
self.input_dim,
self.n_cross_attn_heads,
False, # use_bias
self.attn_dropout,
self.ff_dropout,
self.transformer_activation,
self.latent_dim, # q_dim,
),
)
perceiver_block["cross_attns"] = cross_attns
# Latent Transformer
latent_transformer = nn.Sequential()
for i in range(self.n_latent_blocks):
latent_transformer.add_module(
"latent_block" + str(i),
PerceiverEncoder(
self.latent_dim, # input_dim
self.n_latent_heads,
False, # use_bias
self.attn_dropout,
self.ff_dropout,
self.transformer_activation,
),
)
perceiver_block["latent_transformer"] = latent_transformer
return perceiver_block
@staticmethod
def _extract_attn_weights(cross_attns, latent_transformer) -> List:
attention_weights = []
for cross_attn in cross_attns:
attention_weights.append(cross_attn.attn.attn_weights)
for latent_block in latent_transformer:
attention_weights.append(latent_block.attn.attn_weights)
return attention_weights
| en | 0.718035 | # noqa: F403 Defines an adaptation of a `Perceiver model <https://arxiv.org/abs/2103.03206>`_ that can be used as the ``deeptabular`` component of a Wide & Deep model or independently by itself. Parameters ---------- column_idx: Dict Dict containing the index of the columns that will be passed through the model. Required to slice the tensors. e.g. {'education': 0, 'relationship': 1, 'workclass': 2, ...} cat_embed_input: List, Optional, default = None List of Tuples with the column name and number of unique values for each categorical component e.g. [(education, 11), ...] cat_embed_dropout: float, default = 0.1 Categorical embeddings dropout use_cat_bias: bool, default = False, Boolean indicating if bias will be used for the categorical embeddings cat_embed_activation: Optional, str, default = None, Activation function for the categorical embeddings, if any. `'tanh'`, `'relu'`, `'leaky_relu'` and `'gelu'` are supported. full_embed_dropout: bool, default = False Boolean indicating if an entire embedding (i.e. the representation of one column) will be dropped in the batch. See: :obj:`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`. If ``full_embed_dropout = True``, ``cat_embed_dropout`` is ignored. shared_embed: bool, default = False The idea behind ``shared_embed`` is described in the Appendix A in the `TabTransformer paper <https://arxiv.org/abs/2012.06678>`_: `'The goal of having column embedding is to enable the model to distinguish the classes in one column from those in the other columns'`. In other words, the idea is to let the model learn which column is embedded at the time. add_shared_embed: bool, default = False, The two embedding sharing strategies are: 1) add the shared embeddings to the column embeddings or 2) to replace the first ``frac_shared_embed`` with the shared embeddings. See :obj:`pytorch_widedeep.models.transformers._layers.SharedEmbeddings` frac_shared_embed: float, default = 0.25 The fraction of embeddings that will be shared (if ``add_shared_embed = False``) by all the different categories for one particular column. continuous_cols: List, Optional, default = None List with the name of the numeric (aka continuous) columns cont_norm_layer: str, default = "batchnorm" Type of normalization layer applied to the continuous features. Options are: 'layernorm', 'batchnorm' or None. cont_embed_dropout: float, default = 0.1, Continuous embeddings dropout use_cont_bias: bool, default = True, Boolean indicating if bias will be used for the continuous embeddings cont_embed_activation: str, default = None Activation function to be applied to the continuous embeddings, if any. `'tanh'`, `'relu'`, `'leaky_relu'` and `'gelu'` are supported. input_dim: int, default = 32 The so-called *dimension of the model*. Is the number of embeddings used to encode the categorical and/or continuous columns. n_cross_attns: int, default = 1 Number of times each perceiver block will cross attend to the input data (i.e. number of cross attention components per perceiver block). This should normally be 1. However, in the paper they describe some architectures (normally computer vision-related problems) where the Perceiver attends multiple times to the input array. Therefore, maybe multiple cross attention to the input array is also useful in some cases for tabular data n_cross_attn_heads: int, default = 4 Number of attention heads for the cross attention component n_latents: int, default = 16 Number of latents. This is the *N* parameter in the paper. As indicated in the paper, this number should be significantly lower than *M* (the number of columns in the dataset). Setting *N* closer to *M* defies the main purpose of the Perceiver, which is to overcome the transformer quadratic bottleneck latent_dim: int, default = 128 Latent dimension. n_latent_heads: int, default = 4 Number of attention heads per Latent Transformer n_latent_blocks: int, default = 4 Number of transformer encoder blocks (normalised MHA + normalised FF) per Latent Transformer n_perceiver_blocks: int, default = 4 Number of Perceiver blocks defined as [Cross Attention + Latent Transformer] share_weights: Boolean, default = False Boolean indicating if the weights will be shared between Perceiver blocks attn_dropout: float, default = 0.2 Dropout that will be applied to the Multi-Head Attention layers ff_dropout: float, default = 0.1 Dropout that will be applied to the FeedForward network transformer_activation: str, default = "gelu" Transformer Encoder activation function. `'tanh'`, `'relu'`, `'leaky_relu'`, `'gelu'`, `'geglu'` and `'reglu'` are supported mlp_hidden_dims: List, Optional, default = None MLP hidden dimensions. If not provided it will default to ``[l, 4*l, 2*l]`` where ``l`` is the MLP's input dimension mlp_activation: str, default = "relu" MLP activation function. `'tanh'`, `'relu'`, `'leaky_relu'` and `'gelu'` are supported mlp_dropout: float, default = 0.1 Dropout that will be applied to the final MLP mlp_batchnorm: bool, default = False Boolean indicating whether or not to apply batch normalization to the dense layers mlp_batchnorm_last: bool, default = False Boolean indicating whether or not to apply batch normalization to the last of the dense layers mlp_linear_first: bool, default = False Boolean indicating whether the order of the operations in the dense layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP -> LIN -> ACT]`` Attributes ---------- cat_and_cont_embed: ``nn.Module`` This is the module that processes the categorical and continuous columns perceiver_blks: ``nn.ModuleDict`` ModuleDict with the Perceiver blocks latents: ``nn.Parameter`` Latents that will be used for prediction perceiver_mlp: ``nn.Module`` MLP component in the model output_dim: int The output dimension of the model. This is a required attribute neccesary to build the ``WideDeep`` class Example -------- >>> import torch >>> from pytorch_widedeep.models import TabPerceiver >>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1) >>> colnames = ['a', 'b', 'c', 'd', 'e'] >>> cat_embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)] >>> continuous_cols = ['e'] >>> column_idx = {k:v for v,k in enumerate(colnames)} >>> model = TabPerceiver(column_idx=column_idx, cat_embed_input=cat_embed_input, ... continuous_cols=continuous_cols, n_latents=2, latent_dim=16, ... n_perceiver_blocks=2) >>> out = model(X_tab) # Embeddings are instantiated at the base model # Transformer blocks # Mlp # the output_dim attribute will be used as input_dim when "merging" the models # average along the latent index axis List with the attention weights. If the weights are not shared between perceiver blocks each element of the list will be a list itself containing the Cross Attention and Latent Transformer attention weights respectively The shape of the attention weights is: - Cross Attention: :math:`(N, C, L, F)` - Latent Attention: :math:`(N, T, L, L)` WHere *N* is the batch size, *C* is the number of Cross Attention heads, *L* is the number of Latents, *F* is the number of features/columns in the dataset and *T* is the number of Latent Attention heads # Cross Attention # use_bias # q_dim, # Latent Transformer # input_dim # use_bias | 2.150007 | 2 |
mmdet/models/dense_heads/rpn_test_mixin.py | morkovka1337/mmdetection | 58 | 6632966 | # Copyright (C) 2018-2021 OpenMMLab
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import sys
from mmdet.core import merge_aug_proposals
from mmdet.integration.nncf.utils import no_nncf_trace
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import completed
class RPNTestMixin(object):
"""Test methods of RPN."""
if sys.version_info >= (3, 7):
async def async_simple_test_rpn(self, x, img_metas):
sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025)
async with completed(
__name__, 'rpn_head_forward',
sleep_interval=sleep_interval):
rpn_outs = self(x)
proposal_list = self.get_bboxes(*rpn_outs, img_metas)
return proposal_list
def simple_test_rpn(self, x, img_metas):
"""Test without augmentation.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
img_metas (list[dict]): Meta info of each image.
Returns:
list[Tensor]: Proposals of each image.
"""
rpn_outs = self(x)
with no_nncf_trace():
proposal_list = self.get_bboxes(*rpn_outs, img_metas)
return proposal_list
def aug_test_rpn(self, feats, img_metas):
samples_per_gpu = len(img_metas[0])
aug_proposals = [[] for _ in range(samples_per_gpu)]
for x, img_meta in zip(feats, img_metas):
proposal_list = self.simple_test_rpn(x, img_meta)
for i, proposals in enumerate(proposal_list):
aug_proposals[i].append(proposals)
# reorganize the order of 'img_metas' to match the dimensions
# of 'aug_proposals'
aug_img_metas = []
for i in range(samples_per_gpu):
aug_img_meta = []
for j in range(len(img_metas)):
aug_img_meta.append(img_metas[j][i])
aug_img_metas.append(aug_img_meta)
# after merging, proposals will be rescaled to the original image size
merged_proposals = [
merge_aug_proposals(proposals, aug_img_meta, self.test_cfg)
for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
]
return merged_proposals
| # Copyright (C) 2018-2021 OpenMMLab
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import sys
from mmdet.core import merge_aug_proposals
from mmdet.integration.nncf.utils import no_nncf_trace
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import completed
class RPNTestMixin(object):
"""Test methods of RPN."""
if sys.version_info >= (3, 7):
async def async_simple_test_rpn(self, x, img_metas):
sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025)
async with completed(
__name__, 'rpn_head_forward',
sleep_interval=sleep_interval):
rpn_outs = self(x)
proposal_list = self.get_bboxes(*rpn_outs, img_metas)
return proposal_list
def simple_test_rpn(self, x, img_metas):
"""Test without augmentation.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
img_metas (list[dict]): Meta info of each image.
Returns:
list[Tensor]: Proposals of each image.
"""
rpn_outs = self(x)
with no_nncf_trace():
proposal_list = self.get_bboxes(*rpn_outs, img_metas)
return proposal_list
def aug_test_rpn(self, feats, img_metas):
samples_per_gpu = len(img_metas[0])
aug_proposals = [[] for _ in range(samples_per_gpu)]
for x, img_meta in zip(feats, img_metas):
proposal_list = self.simple_test_rpn(x, img_meta)
for i, proposals in enumerate(proposal_list):
aug_proposals[i].append(proposals)
# reorganize the order of 'img_metas' to match the dimensions
# of 'aug_proposals'
aug_img_metas = []
for i in range(samples_per_gpu):
aug_img_meta = []
for j in range(len(img_metas)):
aug_img_meta.append(img_metas[j][i])
aug_img_metas.append(aug_img_meta)
# after merging, proposals will be rescaled to the original image size
merged_proposals = [
merge_aug_proposals(proposals, aug_img_meta, self.test_cfg)
for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
]
return merged_proposals
| en | 0.577913 | # Copyright (C) 2018-2021 OpenMMLab # SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2020-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # Test methods of RPN. Test without augmentation. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Proposals of each image. # reorganize the order of 'img_metas' to match the dimensions # of 'aug_proposals' # after merging, proposals will be rescaled to the original image size | 1.997244 | 2 |
instagram/migrations/0001_initial.py | Frankline-Kiplangat/instagram-app | 1 | 6632967 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-08-12 23:26
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import pyuploadcare.dj.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField()),
('comment_title', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_created', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('image', pyuploadcare.dj.models.ImageField(blank=True)),
('message', models.CharField(blank=True, max_length=80)),
('name', models.CharField(max_length=80)),
('caption', models.TextField(blank=True)),
('profile', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instagram.Image')),
('likes', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(upload_to='images/')),
('bio', models.CharField(blank=True, max_length=100)),
('user', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='comment',
name='image',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='comment', to='instagram.Image'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-08-12 23:26
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import pyuploadcare.dj.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField()),
('comment_title', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_created', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('image', pyuploadcare.dj.models.ImageField(blank=True)),
('message', models.CharField(blank=True, max_length=80)),
('name', models.CharField(max_length=80)),
('caption', models.TextField(blank=True)),
('profile', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instagram.Image')),
('likes', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(upload_to='images/')),
('bio', models.CharField(blank=True, max_length=100)),
('user', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='comment',
name='image',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='comment', to='instagram.Image'),
),
] | en | 0.78524 | # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2020-08-12 23:26 | 1.785692 | 2 |
flexneuart/featextr_server/base.py | gitter-badger/FlexNeuART | 101 | 6632968 | #
# Copyright 2014+ Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Thrift files are generated from
# ./src/main/java/edu/cmu/lti/oaqa/flexneuart/letor/external/protocol.thrift
#
from flexneuart.featextr_server.python_generated.protocol.ExternalScorer import Processor
from flexneuart.featextr_server.python_generated.protocol.ttypes import ScoringException
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from threading import Lock
SAMPLE_HOST = '127.0.0.1'
SAMPLE_PORT = 8080
class BaseQueryHandler:
def __init__(self, exclusive=True):
self.lock_ = Lock() if exclusive else None
if self.lock_ is not None:
print('Locking the base server for single-threaded processing')
else:
print('NOT locking the base server for multi-threaded processing')
# This function must remain in Camel-case, b/c it's tied to Java code
def getScoresFromParsed(self, query, docs):
try:
if self.lock_ is not None:
with self.lock_:
return self.compute_scores_from_parsed_override(query, docs)
else:
return self.compute_scores_from_parsed_override(query, docs)
except Exception as e:
raise ScoringException(str(e))
# This function must remain in Camel-case, b/c it's tied to Java code
def getScoresFromRaw(self, query, docs):
try:
if self.lock_ is not None:
with self.lock_:
return self.compute_scores_from_raw_override(query, docs)
else:
return self.compute_scores_from_raw_override(query, docs)
except Exception as e:
raise ScoringException(str(e))
def text_entry_to_str(self, te):
arr = []
for winfo in te.entries:
arr.append('%s %g %d ' % (winfo.word, winfo.IDF, winfo.qty))
return te.id + ' '.join(arr)
def concat_text_entry_words(self, te):
arr = [winfo.word for winfo in te.entries]
return ' '.join(arr)
# One or both functions need to be implemented in a child class
def compute_scores_from_parsed_override(self, query, docs):
raise ScoringException('Parsed fields are not supported by this server!')
def compute_scores_from_raw_override(self, query, docs):
raise ScoringException('Raw-text fields are not supported by this server!')
# This function starts the server and takes over the program control
def start_query_server(host, port, multi_threaded, query_handler):
processor = Processor(query_handler)
transport = TSocket.TServerSocket(host=host, port=port)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
if multi_threaded:
print('Starting a multi-threaded server...')
server = TServer.TThreadedServer(processor, transport, tfactory, pfactory)
else:
print('Starting a single-threaded server...')
server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
server.serve()
print('done.')
| #
# Copyright 2014+ Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Thrift files are generated from
# ./src/main/java/edu/cmu/lti/oaqa/flexneuart/letor/external/protocol.thrift
#
from flexneuart.featextr_server.python_generated.protocol.ExternalScorer import Processor
from flexneuart.featextr_server.python_generated.protocol.ttypes import ScoringException
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from threading import Lock
SAMPLE_HOST = '127.0.0.1'
SAMPLE_PORT = 8080
class BaseQueryHandler:
def __init__(self, exclusive=True):
self.lock_ = Lock() if exclusive else None
if self.lock_ is not None:
print('Locking the base server for single-threaded processing')
else:
print('NOT locking the base server for multi-threaded processing')
# This function must remain in Camel-case, b/c it's tied to Java code
def getScoresFromParsed(self, query, docs):
try:
if self.lock_ is not None:
with self.lock_:
return self.compute_scores_from_parsed_override(query, docs)
else:
return self.compute_scores_from_parsed_override(query, docs)
except Exception as e:
raise ScoringException(str(e))
# This function must remain in Camel-case, b/c it's tied to Java code
def getScoresFromRaw(self, query, docs):
try:
if self.lock_ is not None:
with self.lock_:
return self.compute_scores_from_raw_override(query, docs)
else:
return self.compute_scores_from_raw_override(query, docs)
except Exception as e:
raise ScoringException(str(e))
def text_entry_to_str(self, te):
arr = []
for winfo in te.entries:
arr.append('%s %g %d ' % (winfo.word, winfo.IDF, winfo.qty))
return te.id + ' '.join(arr)
def concat_text_entry_words(self, te):
arr = [winfo.word for winfo in te.entries]
return ' '.join(arr)
# One or both functions need to be implemented in a child class
def compute_scores_from_parsed_override(self, query, docs):
raise ScoringException('Parsed fields are not supported by this server!')
def compute_scores_from_raw_override(self, query, docs):
raise ScoringException('Raw-text fields are not supported by this server!')
# This function starts the server and takes over the program control
def start_query_server(host, port, multi_threaded, query_handler):
processor = Processor(query_handler)
transport = TSocket.TServerSocket(host=host, port=port)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
if multi_threaded:
print('Starting a multi-threaded server...')
server = TServer.TThreadedServer(processor, transport, tfactory, pfactory)
else:
print('Starting a single-threaded server...')
server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
server.serve()
print('done.')
| en | 0.845401 | # # Copyright 2014+ Carnegie Mellon University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Thrift files are generated from # ./src/main/java/edu/cmu/lti/oaqa/flexneuart/letor/external/protocol.thrift # # This function must remain in Camel-case, b/c it's tied to Java code # This function must remain in Camel-case, b/c it's tied to Java code # One or both functions need to be implemented in a child class # This function starts the server and takes over the program control | 1.769249 | 2 |
app/app4.py | lalopark/1DS_HW2 | 0 | 6632969 | # app4.py
import numpy as np
import pandas as pd
import altair as alt
import plotly.express as px
import plotly.graph_objs as go
import pickle as pkle
import os.path
import streamlit as st
def app():
st.title('Writeup')
st.write('We created several different visualizations of the data set from both a macro and micro lens by\
first illustrated the general statistical distributions of the data scientist candidate population in terms\
of city development index, training hour, experience, latest jobs, and education levels (in Analysis 1 Tab),\
then drilling down to a gender and education level-specific breakdowns to see if there are any noticeable employment\
trends among groups of different gender identities and academic achievements. The City Development Indices of the cities\
that the candidates reside in are extremely left-skewed, with the median at 0.86 with an overwhelming majority of residents\
residing in cities with well-established Infrastructure, Waste Management, Health, Education, and City Product, as defined by\
the United Nations. The specific country/regional information wasn’t provided in the dataset, so the developers would refrain\
from making potentially biased assumptions, but it’s interesting to note that there’s a spike at 0.62 with a moderately large\
group of candidates residing in less developed cities. Our box plot distributions of CDI by Education Level show that Masters,\
Graduates, and PhD’s are highly centered around cities with high CDI’s, while high school and primary school grads are scattered\
towards lower CDI’s with slightly more outliers. We hypothesize that candidates in developing cities may have access to online/open-source\
material that can supplement or replace formal training in DS, hence the supply in the job market.')
st.write('60% of the dataset are graduate students, with 60% having Graduate Degrees, 22.8% with Undergraduate, 2.15% with PhDs\
and 10.5% with high school degrees. The developers found this distribution quite jarringly different from the job market situation\
in the U.S., where closely 50-60% of the data scientist job applicants hold Master’s degrees or higher, so we deemed it a factor highly\
dependent on the region/continent, which is unknown. The years of experience by education level is as expected with PhD’s and Master’s\
students having the upper bound of 20> years, followed by Undergraduate degree holders, then High School and Primary Schoolers. Since Data\
Scientists used to be primarily PhD’s or academic scholars, it’s not surprising that those groups have more experiences than others.\
The experience distribution by major was quite contrary to our hypothesis - that STEM degree holders will have more YoE on average - with \
all disciplines having pretty much equivalent distributions.')
st.write('We must note that our dataset’s intrinsically imbalanced in terms of the candidates’ experience and gender, with nearly ~40% of the\
candidates having 20+ years of work experience as a Data Scientist. Another limitation that the dataset had was its ambiguity in certain\
columns, including training hours, which the developers assumed as the # of hours formally trained as a data scientist apart from general\
work experience. This information would have been more meaningful if it were the number of hours per week spent on training oneself as a\
better Data Professional, but since it isn’t, the more relevant work experiences as a Data Scientist, the longer the training hours, hence\
the apparent correlation between having relevant work experience and higher training hours.')
st.write('Last New Job distribution was quite interesting, with 60% of the candidates only having worked at their current job for less than a year.\
Given that DS’s are predominantly hired in tech companies or at tech functions, it’s not surprising that frequent job switches are common\
and not necessarily frowned upon, compared to more traditional industries.')
st.write('We include two gender-related distributions before deep-diving into it in Analysis 2, as the dataset has 1.5x more male than female candidates,\
it was hard to recognize the data points that represent female data scientists in Distribution of Data Scientists by Experience and Last New Job.\
In almost all cases, the number of male data scientists is much higher than the female data scientists, female data scientists points were covered.\
This graph showed that the respondents who have worked for their previous company for 4 years tended to have longer working experience.\
On the other hand, the ones who have shorter working experience have changed their positions or jobs more often.')
st.write('To classify the data scientists by their gender and education level, we added two drop-down menus, so the users can easily select\
the data that matches with a certain condition and only use it to create plots. Using these two options, we created three-bar plots\
which show the distribution of data scientists by their enrolled university types, majors, and company types. The majority of the data\
scientists in the given data set are not currently enrolled in university. However, most Ph.D.-level data scientists answered that they\
are currently enrolled in university. Also, the proportion of data scientists who are currently attending university full-time was much\
higher in the female data scientists group than in the male group.')
st.write('In the major graph, as was expected, the majority of data scientists studied STEM majors regardless of gender,\
and those who did not attend university are classified as ‘other’ in the major graph. The number of data scientists who studied Arts\
during their undergrad was the lowest in this distribution graph.')
st.write('Lastly, to find which type of companies hire data scientists the most, we drew a graph that shows the company type and size\
that the respondents are currently working for. According to their answers, the majority works for small private firms regardless of gender.\
However, when we selected only Ph.D.-level data scientists, the result was different. The proportion of respondents\
who work for the public sector has increased.')
| # app4.py
import numpy as np
import pandas as pd
import altair as alt
import plotly.express as px
import plotly.graph_objs as go
import pickle as pkle
import os.path
import streamlit as st
def app():
st.title('Writeup')
st.write('We created several different visualizations of the data set from both a macro and micro lens by\
first illustrated the general statistical distributions of the data scientist candidate population in terms\
of city development index, training hour, experience, latest jobs, and education levels (in Analysis 1 Tab),\
then drilling down to a gender and education level-specific breakdowns to see if there are any noticeable employment\
trends among groups of different gender identities and academic achievements. The City Development Indices of the cities\
that the candidates reside in are extremely left-skewed, with the median at 0.86 with an overwhelming majority of residents\
residing in cities with well-established Infrastructure, Waste Management, Health, Education, and City Product, as defined by\
the United Nations. The specific country/regional information wasn’t provided in the dataset, so the developers would refrain\
from making potentially biased assumptions, but it’s interesting to note that there’s a spike at 0.62 with a moderately large\
group of candidates residing in less developed cities. Our box plot distributions of CDI by Education Level show that Masters,\
Graduates, and PhD’s are highly centered around cities with high CDI’s, while high school and primary school grads are scattered\
towards lower CDI’s with slightly more outliers. We hypothesize that candidates in developing cities may have access to online/open-source\
material that can supplement or replace formal training in DS, hence the supply in the job market.')
st.write('60% of the dataset are graduate students, with 60% having Graduate Degrees, 22.8% with Undergraduate, 2.15% with PhDs\
and 10.5% with high school degrees. The developers found this distribution quite jarringly different from the job market situation\
in the U.S., where closely 50-60% of the data scientist job applicants hold Master’s degrees or higher, so we deemed it a factor highly\
dependent on the region/continent, which is unknown. The years of experience by education level is as expected with PhD’s and Master’s\
students having the upper bound of 20> years, followed by Undergraduate degree holders, then High School and Primary Schoolers. Since Data\
Scientists used to be primarily PhD’s or academic scholars, it’s not surprising that those groups have more experiences than others.\
The experience distribution by major was quite contrary to our hypothesis - that STEM degree holders will have more YoE on average - with \
all disciplines having pretty much equivalent distributions.')
st.write('We must note that our dataset’s intrinsically imbalanced in terms of the candidates’ experience and gender, with nearly ~40% of the\
candidates having 20+ years of work experience as a Data Scientist. Another limitation that the dataset had was its ambiguity in certain\
columns, including training hours, which the developers assumed as the # of hours formally trained as a data scientist apart from general\
work experience. This information would have been more meaningful if it were the number of hours per week spent on training oneself as a\
better Data Professional, but since it isn’t, the more relevant work experiences as a Data Scientist, the longer the training hours, hence\
the apparent correlation between having relevant work experience and higher training hours.')
st.write('Last New Job distribution was quite interesting, with 60% of the candidates only having worked at their current job for less than a year.\
Given that DS’s are predominantly hired in tech companies or at tech functions, it’s not surprising that frequent job switches are common\
and not necessarily frowned upon, compared to more traditional industries.')
st.write('We include two gender-related distributions before deep-diving into it in Analysis 2, as the dataset has 1.5x more male than female candidates,\
it was hard to recognize the data points that represent female data scientists in Distribution of Data Scientists by Experience and Last New Job.\
In almost all cases, the number of male data scientists is much higher than the female data scientists, female data scientists points were covered.\
This graph showed that the respondents who have worked for their previous company for 4 years tended to have longer working experience.\
On the other hand, the ones who have shorter working experience have changed their positions or jobs more often.')
st.write('To classify the data scientists by their gender and education level, we added two drop-down menus, so the users can easily select\
the data that matches with a certain condition and only use it to create plots. Using these two options, we created three-bar plots\
which show the distribution of data scientists by their enrolled university types, majors, and company types. The majority of the data\
scientists in the given data set are not currently enrolled in university. However, most Ph.D.-level data scientists answered that they\
are currently enrolled in university. Also, the proportion of data scientists who are currently attending university full-time was much\
higher in the female data scientists group than in the male group.')
st.write('In the major graph, as was expected, the majority of data scientists studied STEM majors regardless of gender,\
and those who did not attend university are classified as ‘other’ in the major graph. The number of data scientists who studied Arts\
during their undergrad was the lowest in this distribution graph.')
st.write('Lastly, to find which type of companies hire data scientists the most, we drew a graph that shows the company type and size\
that the respondents are currently working for. According to their answers, the majority works for small private firms regardless of gender.\
However, when we selected only Ph.D.-level data scientists, the result was different. The proportion of respondents\
who work for the public sector has increased.')
| en | 0.947025 | # app4.py # of hours formally trained as a data scientist apart from general\ | 2.697246 | 3 |
robosuite/environments/panda.py | sumaaail/vices | 1 | 6632970 | from collections import OrderedDict
import numpy as np
import robosuite.utils.transform_utils as T
from robosuite.environments import MujocoEnv
from robosuite.models.grippers import gripper_factory
from robosuite.models.robots import Panda
from robosuite.controllers.arm_controller import *
from collections import deque
import hjson
class PandaEnv(MujocoEnv):
"""Initializes a Panda robot environment."""
def __init__(
self,
controller_config_file,
controller,
gripper_type=None,
gripper_visualization=False,
use_indicator_object=False,
has_renderer=False,
has_offscreen_renderer=True,
render_collision_mesh=False,
render_visual_mesh=True,
control_freq=10,
horizon=1000,
ignore_done=False,
use_camera_obs=False,
camera_name="frontview",
camera_height=256,
camera_width=256,
camera_depth=False,
impedance_ctrl=True, # TODO
initial_policy=None, # TODO - currently not included in the config file (should be a function)
**kwargs
):
"""
Args:
controller_config_file (str): filepath to the corresponding controller config file that contains the
associated controller parameters
controller (str): Can be 'position', 'position_orientation', 'joint_velocity', 'joint_impedance', or
'joint_torque'. Specifies the type of controller to be used for dynamic trajectories
gripper_type (str): type of gripper, used to instantiate
gripper models from gripper factory.
gripper_visualization (bool): True if using gripper visualization.
Useful for teleoperation.
use_indicator_object (bool): if True, sets up an indicator object that
is useful for debugging.
has_renderer (bool): If true, render the simulation state in
a viewer instead of headless mode.
has_offscreen_renderer (bool): True if using off-screen rendering.
render_collision_mesh (bool): True if rendering collision meshes
in camera. False otherwise.
render_visual_mesh (bool): True if rendering visual meshes
in camera. False otherwise.
control_freq (float): how many control signals to receive
in every second. This sets the amount of simulation time
that passes between every action input.
horizon (int): Every episode lasts for exactly @horizon timesteps.
ignore_done (bool): True if never terminating the environment (ignore @horizon).
use_camera_obs (bool): if True, every observation includes a
rendered image.
camera_name (str): name of camera to be rendered. Must be
set if @use_camera_obs is True.
camera_height (int): height of camera frame.
camera_width (int): width of camera frame.
camera_depth (bool): True if rendering RGB-D, and RGB otherwise.
impedance_ctrl (bool) : True if we want to control impedance of the end effector
#########
**kwargs includes additional params that may be specified and will override values found in
the controller configuration file if the names match
"""
self.initial_policy = initial_policy
self.impedance_ctrl = impedance_ctrl
if self.impedance_ctrl:
# Load the appropriate controller
self._load_controller(controller, controller_config_file, kwargs)
if 'residual_policy_multiplier' in kwargs:
self.residual_policy_multiplier = kwargs['residual_policy_multiplier']
else:
self.residual_policy_multiplier = None
self.goal = np.zeros(3)
self.goal_orientation = np.zeros(3)
self.desired_force = np.zeros(3)
self.desired_torque = np.zeros(3)
if 'residual_policy_multiplier' in kwargs:
self.residual_policy_multiplier = kwargs['residual_policy_multiplier']
else:
self.residual_policy_multiplier = None
self.initial_policy = initial_policy
self.control_freq = control_freq
self.timestep = 0
# self.position_limits = [[0,0,0],[0,0,0]]
# self.orientation_limits = [[0,0,0],[0,0,0]]
self.ee_force = np.zeros(3)
self.ee_force_bias = np.zeros(3)
self.contact_threshold = 1 # Maximum contact variation allowed without contact [N]
self.ee_torque = np.zeros(3)
self.ee_torque_bias = np.zeros(3)
# self.controller = controller
# TODO - check that these are updated properly
self.total_kp = np.zeros(6)
self.total_damping = np.zeros(6)
self.n_avg_ee_acc = 10
self.has_gripper = gripper_type is not None
self.gripper_type = gripper_type
self.gripper_visualization = gripper_visualization
self.use_indicator_object = use_indicator_object
super().__init__(
has_renderer=has_renderer,
has_offscreen_renderer=has_offscreen_renderer,
render_collision_mesh=render_collision_mesh,
render_visual_mesh=render_visual_mesh,
control_freq=control_freq,
horizon=horizon,
ignore_done=ignore_done,
use_camera_obs=use_camera_obs,
camera_name=camera_name,
camera_height=camera_height,
camera_width=camera_height,
camera_depth=camera_depth,
)
# Current and previous policy step q values, joint torques, ft ee applied and actions
self.prev_pstep_ft = np.zeros(6)
self.curr_pstep_ft = np.zeros(6)
self.prev_pstep_a = np.zeros(self.dof)
self.curr_pstep_a = np.zeros(self.dof)
self.prev_pstep_q = np.zeros(len(self._ref_joint_vel_indexes))
self.curr_pstep_q = np.zeros(len(self._ref_joint_vel_indexes))
self.prev_pstep_t = np.zeros(len(self._ref_joint_vel_indexes))
self.curr_pstep_t = np.zeros(len(self._ref_joint_vel_indexes))
self.prev_pstep_ee_v = np.zeros(6)
self.curr_pstep_ee_v = np.zeros(6)
self.buffer_pstep_ee_v = deque(np.zeros(6) for _ in range(self.n_avg_ee_acc))
self.ee_acc = np.zeros(6)
self.total_ee_acc = np.zeros(6) # used to compute average
self.total_js_energy = np.zeros(len(self._ref_joint_vel_indexes))
self.torque_total = 0
self.joint_torques = 0
self.prev_ee_pos = np.zeros(7)
self.ee_pos = np.zeros(7)
## counting joint limits
self.joint_limit_count = 0
def _load_controller(self, controller_type, controller_file, kwargs):
"""
Loads controller to be used for dynamic trajectories
Controller_type is a specified controller, and controller_params is a config file containing the appropriate
parameters for that controller
Kwargs is kwargs passed from init call and represents individual params to override in controller config file
"""
# Load the controller config file
try:
with open(controller_file) as f:
params = hjson.load(f)
except FileNotFoundError:
print("Controller config file '{}' not found. Please check filepath and try again.".format(
controller_file))
controller_params = params[controller_type]
# Load additional arguments from kwargs and override the prior config-file loaded ones
for key, value in kwargs.items():
if key in controller_params:
controller_params[key] = value
if controller_type == ControllerType.POS:
self.controller = PositionController(**controller_params)
elif controller_type == ControllerType.POS_ORI:
self.controller = PositionOrientationController(**controller_params)
elif controller_type == ControllerType.JOINT_IMP:
self.controller = JointImpedanceController(**controller_params)
elif controller_type == ControllerType.JOINT_TORQUE:
self.controller = JointTorqueController(**controller_params)
else:
self.controller = JointVelocityController(**controller_params)
def _load_model(self):
"""
Loads robot and optionally add grippers.
"""
super()._load_model()
# Use xml that has motor torque actuators enabled
self.mujoco_robot = Panda(xml_path="robots/panda/robot_torque.xml")
if self.has_gripper:
self.gripper = gripper_factory(self.gripper_type)
if not self.gripper_visualization:
self.gripper.hide_visualization()
self.mujoco_robot.add_gripper("right_hand", self.gripper)
def _reset_internal(self):
"""
Sets initial pose of arm and grippers.
"""
super()._reset_internal()
self.sim.data.qpos[self._ref_joint_pos_indexes] = self.mujoco_robot.init_qpos
if self.has_gripper:
self.sim.data.qpos[
self._ref_joint_gripper_actuator_indexes
] = self.gripper.init_qpos
self.controller.reset()
self.goal = np.zeros(3)
self.goal_orientation = np.zeros(3)
self.desired_force = np.zeros(3)
self.desired_torque = np.zeros(3)
self.prev_pstep_q = np.array(self.mujoco_robot.init_qpos)
self.curr_pstep_q = np.array(self.mujoco_robot.init_qpos)
self.prev_pstep_a = np.zeros(self.dof)
self.curr_pstep_a = np.zeros(self.dof)
self.prev_pstep_ee_v = np.zeros(6)
self.curr_pstep_ee_v = np.zeros(6)
self.buffer_pstep_ee_v = deque(np.zeros(6) for _ in range(self.n_avg_ee_acc))
self.ee_acc = np.zeros(6)
self.total_ee_acc = np.zeros(6) # used to compute average
self.total_kp = np.zeros(6)
self.total_damping = np.zeros(6)
self.total_js_energy = np.zeros(len(self._ref_joint_vel_indexes))
self.prev_ee_pos = np.zeros(7)
self.ee_pos = np.zeros(7)
self.total_joint_torque = 0
self.joint_torques = 0
def _get_reference(self):
"""
Sets up necessary reference for robots, grippers, and objects.
"""
super()._get_reference()
# indices for joints in qpos, qvel
self.robot_joints = list(self.mujoco_robot.joints)
self._ref_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in self.robot_joints
]
self._ref_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in self.robot_joints
]
if self.use_indicator_object:
ind_qpos = self.sim.model.get_joint_qpos_addr("pos_indicator")
self._ref_indicator_pos_low, self._ref_indicator_pos_high = ind_qpos
ind_qvel = self.sim.model.get_joint_qvel_addr("pos_indicator")
self._ref_indicator_vel_low, self._ref_indicator_vel_high = ind_qvel
self.indicator_id = self.sim.model.body_name2id("pos_indicator")
# indices for grippers in qpos, qvel
if self.has_gripper:
self.gripper_joints = list(self.gripper.joints)
self._ref_gripper_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_joints
]
self._ref_gripper_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_joints
]
# indices for joint pos actuation, joint vel actuation, gripper actuation
self._ref_joint_pos_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("pos")
]
self._ref_joint_vel_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("vel")
]
if self.has_gripper:
self._ref_joint_gripper_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("gripper")
]
# IDs of sites for gripper visualization
self.eef_site_id = self.sim.model.site_name2id("grip_site")
self.eef_cylinder_id = self.sim.model.site_name2id("grip_site_cylinder")
def move_indicator(self, pos):
"""
Sets 3d position of indicator object to @pos.
"""
if self.use_indicator_object:
index = self._ref_indicator_pos_low
self.sim.data.qpos[index : index + 3] = pos
def _pre_action(self, action, policy_step):
"""
Overrides the superclass method to actuate the robot with the
passed joint velocities and gripper control.
Args:
action (numpy array): The control to apply to the robot. The first
@self.mujoco_robot.dof dimensions should be the desired
normalized joint velocities and if the robot has
a gripper, the next @self.gripper.dof dimensions should be
actuation controls for the gripper.
"""
self.policy_step = policy_step
# Make sure action length is correct
assert len(action) == self.dof, "environment got invalid action dimension"
# i.e.: not using new controller
if not self.impedance_ctrl:
# clip actions into valid range
low, high = self.action_spec
action = np.clip(action, low, high)
if self.has_gripper:
arm_action = action[: self.mujoco_robot.dof]
gripper_action_in = action[
self.mujoco_robot.dof: self.mujoco_robot.dof + self.gripper.dof
]
gripper_action_actual = self.gripper.format_action(gripper_action_in)
action = np.concatenate([arm_action, gripper_action_actual])
# rescale normalized action to control ranges
ctrl_range = self.sim.model.actuator_ctrlrange
bias = 0.5 * (ctrl_range[:, 1] + ctrl_range[:, 0])
weight = 0.5 * (ctrl_range[:, 1] - ctrl_range[:, 0])
applied_action = bias + weight * action
self.sim.data.ctrl[self._ref_joint_vel_indexes] = applied_action
# gravity compensation
self.sim.data.qfrc_applied[
self._ref_joint_vel_indexes
] = self.sim.data.qfrc_bias[self._ref_joint_vel_indexes]
if self.use_indicator_object:
self.sim.data.qfrc_applied[
self._ref_indicator_vel_low: self._ref_indicator_vel_high
] = self.sim.data.qfrc_bias[
self._ref_indicator_vel_low: self._ref_indicator_vel_high
]
# using new controller
else:
# Split action into joint control and peripheral (i.e.: gripper) control (as specified by individual gripper)
gripper_action = []
if self.has_gripper:
gripper_action = action[self.controller.control_dim:] # all indexes past controller dimension indexes
action = action[:self.controller.control_dim]
# TODO
# First, get joint space action
# action = action.copy() # ensure that we don't change the action outside of this scope
self.controller.update_model(self.sim, id_name='right_hand', joint_index=self._ref_joint_pos_indexes)
torques = self.controller.action_to_torques(action,
self.policy_step) # this scales and clips the actions correctly
if self.initial_policy:
initial_policy_torques = self.initial_policy.action_to_torques(self.sim, 'right_hand',
self._ref_joint_pos_indexes,
self.initial_policy_action,
self.policy_step)
self.residual_torques = torques
self.initial_torques = initial_policy_torques
if self.residual_policy_multiplier is not None:
torques = self.residual_policy_multiplier * torques + initial_policy_torques
else:
torques = torques + initial_policy_torques # TODO
self.total_joint_torque += np.sum(abs(torques))
self.joint_torques = torques
# Get gripper action, if applicable
if self.has_gripper:
gripper_action_actual = self.gripper.format_action(gripper_action)
# rescale normalized gripper action to control ranges
ctrl_range = self.sim.model.actuator_ctrlrange[self._ref_gripper_joint_vel_indexes]
bias = 0.5 * (ctrl_range[:, 1] + ctrl_range[:, 0])
weight = 0.5 * (ctrl_range[:, 1] - ctrl_range[:, 0])
applied_gripper_action = bias + weight * gripper_action_actual
self.sim.data.ctrl[self._ref_gripper_joint_vel_indexes] = applied_gripper_action
# Now, control both gripper and joints
self.sim.data.ctrl[self._ref_joint_vel_indexes] = self.sim.data.qfrc_bias[
self._ref_joint_vel_indexes] + torques
if self.policy_step:
self.prev_pstep_q = np.array(self.curr_pstep_q)
self.curr_pstep_q = np.array(self.sim.data.qpos[self._ref_joint_vel_indexes])
self.prev_pstep_a = np.array(self.curr_pstep_a)
self.curr_pstep_a = np.array(action) # .copy()) # TODO
self.prev_pstep_t = np.array(self.curr_pstep_t)
self.curr_pstep_t = np.array(self.sim.data.ctrl[self._ref_joint_vel_indexes])
self.prev_pstep_ft = np.array(self.curr_pstep_ft)
# Assumes a ft sensor on the wrist
force_sensor_id = self.sim.model.sensor_name2id("force_ee")
force_ee = self.sim.data.sensordata[force_sensor_id * 3: force_sensor_id * 3 + 3]
torque_sensor_id = self.sim.model.sensor_name2id("torque_ee")
torque_ee = self.sim.data.sensordata[torque_sensor_id * 3: torque_sensor_id * 3 + 3]
self.curr_pstep_ft = np.concatenate([force_ee, torque_ee])
self.prev_pstep_ee_v = self.curr_pstep_ee_v
self.curr_pstep_ee_v = np.concatenate(
[self.sim.data.body_xvelp[self.sim.model.body_name2id("right_hand")],
self.sim.data.body_xvelr[self.sim.model.body_name2id("right_hand")]])
self.buffer_pstep_ee_v.popleft()
self.buffer_pstep_ee_v.append(self.curr_pstep_ee_v)
# convert to matrix
buffer_mat = []
for v in self.buffer_pstep_ee_v:
buffer_mat += [v]
buffer_mat = np.vstack(buffer_mat)
diffs = np.diff(buffer_mat, axis=0)
diffs *= self.control_freq
diffs = np.vstack([self.ee_acc, diffs])
diffs.reshape((self.n_avg_ee_acc, 6))
self.ee_acc = np.array(
[np.convolve(col, np.ones((self.n_avg_ee_acc,)) / self.n_avg_ee_acc, mode='valid')[0] for col in
diffs.transpose()])
def _post_action(self, action):
"""
(Optional) does gripper visualization after actions.
"""
self.prev_ee_pos = self.ee_pos
self.ee_pos = np.array(self.sim.data.body_xpos[self.sim.model.body_name2id('right_hand')])
force_sensor_id = self.sim.model.sensor_name2id("force_ee")
self.ee_force = np.array(self.sim.data.sensordata[force_sensor_id * 3: force_sensor_id * 3 + 3])
if np.linalg.norm(self.ee_force_bias) == 0:
self.ee_force_bias = self.ee_force
torque_sensor_id = self.sim.model.sensor_name2id("torque_ee")
self.ee_torque = np.array(self.sim.data.sensordata[torque_sensor_id * 3: torque_sensor_id * 3 + 3])
if np.linalg.norm(self.ee_torque_bias) == 0:
self.ee_torque_bias = self.ee_torque
ret = super()._post_action(action)
self._gripper_visualization()
return ret
def _get_observation(self):
"""
Returns an OrderedDict containing observations [(name_string, np.array), ...].
Important keys:
robot-state: contains robot-centric information.
"""
di = super()._get_observation()
# camera observations
if self.use_camera_obs:
camera_obs = self.sim.render(camera_name=self.camera_name,
width=self.camera_width,
height=self.camera_height,
depth=self.camera_depth)
if self.camera_depth:
di['image'], di['depth'] = camera_obs
else:
di['image'] = camera_obs
# Skip for now, not worth importing cv2 just for this
# if self.visualize_offscreen and not self.real_robot:
# cv2.imshow('Robot observation', np.flip(camera_obs[..., ::-1], 0))
# cv2.waitKey(10)
# proprioceptive features
di["joint_pos"] = np.array(
[self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes]
)
di["joint_vel"] = np.array(
[self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes]
)
robot_states = [
np.sin(di["joint_pos"]),
np.cos(di["joint_pos"]),
di["joint_vel"],
]
if self.has_gripper:
di["gripper_qpos"] = np.array(
[self.sim.data.qpos[x] for x in self._ref_gripper_joint_pos_indexes]
)
di["gripper_qvel"] = np.array(
[self.sim.data.qvel[x] for x in self._ref_gripper_joint_vel_indexes]
)
di["eef_pos"] = np.array(self.sim.data.body_xpos[self.sim.model.body_name2id('right_hand')])
di["eef_quat"] = T.convert_quat(
self.sim.data.get_body_xquat("right_hand"), to="xyzw"
)
di["eef_vlin"] = np.array(self.sim.data.get_body_xvelp('right_hand'))
di["eef_vang"] = np.array(self.sim.data.get_body_xvelr('right_hand'))
# add in gripper information
robot_states.extend([di["gripper_qpos"], di["eef_pos"], di["eef_quat"], di["eef_vlin"], di["eef_vang"]])
di["robot-state"] = np.concatenate(robot_states)
di["prev-act"] = self.prev_pstep_a
# Adding binary contact observation
in_contact = np.linalg.norm(self.ee_force - self.ee_force_bias) > self.contact_threshold
di["contact-obs"] = in_contact
return di
@property
def action_spec(self):
"""
Action lower/upper limits per dimension.
"""
low = np.ones(self.dof) * -1.
high = np.ones(self.dof) * 1.
return low, high
@property
def dof(self):
"""
Returns the DoF of the robot (with grippers).
"""
if self.impedance_ctrl:
dof = self.controller.action_dim
else:
dof = self.mujoco_robot.dof
if self.has_gripper:
dof += self.gripper.dof
return dof
def pose_in_base_from_name(self, name):
"""
A helper function that takes in a named data field and returns the pose
of that object in the base frame.
"""
pos_in_world = self.sim.data.get_body_xpos(name)
rot_in_world = self.sim.data.get_body_xmat(name).reshape((3, 3))
pose_in_world = T.make_pose(pos_in_world, rot_in_world)
base_pos_in_world = self.sim.data.get_body_xpos("base")
base_rot_in_world = self.sim.data.get_body_xmat("base").reshape((3, 3))
base_pose_in_world = T.make_pose(base_pos_in_world, base_rot_in_world)
world_pose_in_base = T.pose_inv(base_pose_in_world)
pose_in_base = T.pose_in_A_to_pose_in_B(pose_in_world, world_pose_in_base)
return pose_in_base
def set_robot_joint_positions(self, jpos):
"""
Helper method to force robot joint positions to the passed values.
"""
self.sim.data.qpos[self._ref_joint_pos_indexes] = jpos
self.sim.forward()
@property
def _right_hand_joint_cartesian_pose(self):
"""
Returns the cartesian pose of the last robot joint in base frame of robot.
"""
return self.pose_in_base_from_name("right_l6")
@property
def _right_hand_pose(self):
"""
Returns eef pose in base frame of robot.
"""
return self.pose_in_base_from_name("right_hand")
@property
def _right_hand_quat(self):
"""
Returns eef quaternion in base frame of robot.
"""
return T.mat2quat(self._right_hand_orn)
@property
def _right_hand_total_velocity(self):
"""
Returns the total eef velocity (linear + angular) in the base frame
as a numpy array of shape (6,)
"""
# Use jacobian to translate joint velocities to end effector velocities.
Jp = self.sim.data.get_body_jacp("right_hand").reshape((3, -1))
Jp_joint = Jp[:, self._ref_joint_vel_indexes]
Jr = self.sim.data.get_body_jacr("right_hand").reshape((3, -1))
Jr_joint = Jr[:, self._ref_joint_vel_indexes]
eef_lin_vel = Jp_joint.dot(self._joint_velocities)
eef_rot_vel = Jr_joint.dot(self._joint_velocities)
return np.concatenate([eef_lin_vel, eef_rot_vel])
@property
def _right_hand_pos(self):
"""
Returns position of eef in base frame of robot.
"""
eef_pose_in_base = self._right_hand_pose
return eef_pose_in_base[:3, 3]
@property
def _right_hand_orn(self):
"""
Returns orientation of eef in base frame of robot as a rotation matrix.
"""
eef_pose_in_base = self._right_hand_pose
return eef_pose_in_base[:3, :3]
@property
def _right_hand_vel(self):
"""
Returns velocity of eef in base frame of robot.
"""
return self._right_hand_total_velocity[:3]
@property
def _right_hand_ang_vel(self):
"""
Returns angular velocity of eef in base frame of robot.
"""
return self._right_hand_total_velocity[3:]
@property
def _joint_positions(self):
"""
Returns a numpy array of joint positions.
Panda robots have 7 joints and positions are in rotation angles.
"""
return self.sim.data.qpos[self._ref_joint_pos_indexes]
@property
def _joint_velocities(self):
"""
Returns a numpy array of joint velocities.
Panda robots have 7 joints and velocities are angular velocities.
"""
return self.sim.data.qvel[self._ref_joint_vel_indexes]
def _gripper_visualization(self):
"""
Do any needed visualization here.
"""
# By default, don't do any coloring.
self.sim.model.site_rgba[self.eef_site_id] = [0., 0., 0., 0.]
def _check_contact(self):
"""
Returns True if the gripper is in contact with another object.
"""
return False
def _check_arm_contact(self):
"""
Returns True if the arm is in contact with another object.
"""
collision = False
for contact in self.sim.data.contact[:self.sim.data.ncon]:
if self.sim.model.geom_id2name(contact.geom1) in self.mujoco_robot.contact_geoms or \
self.sim.model.geom_id2name(contact.geom2) in self.mujoco_robot.contact_geoms:
collision = True
break
return collision
def _check_q_limits(self):
"""
Returns True if the arm is in joint limits or very close to.
"""
joint_limits = False
tolerance = 0.1
for (idx, (q, q_limits)) in enumerate(
zip(self.sim.data.qpos[self._ref_joint_pos_indexes], self.sim.model.jnt_range)):
if not (q > q_limits[0] + tolerance and q < q_limits[1] - tolerance):
print("Joint limit reached in joint " + str(idx))
joint_limits = True
self.joint_limit_count += 1
return joint_limits
def _compute_q_delta(self):
"""
Returns the change in joint space configuration between previous and current steps
"""
q_delta = self.prev_pstep_q - self.curr_pstep_q
return q_delta
def _compute_t_delta(self):
"""
Returns the change in joint space configuration between previous and current steps
"""
t_delta = self.prev_pstep_t - self.curr_pstep_t
return t_delta
def _compute_a_delta(self):
"""
Returns the change in policy action between previous and current steps
"""
a_delta = self.prev_pstep_a - self.curr_pstep_a
return a_delta
def _compute_ft_delta(self):
"""
Returns the change in policy action between previous and current steps
"""
ft_delta = self.prev_pstep_ft - self.curr_pstep_ft
return ft_delta
def _compute_js_energy(self):
"""
Returns the energy consumed by each joint between previous and current steps
"""
# Mean torque applied
mean_t = self.prev_pstep_t - self.curr_pstep_t
# We assume in the motors torque is proportional to current (and voltage is constant)
# In that case the amount of power scales proportional to the torque and the energy is the
# time integral of that
js_energy = np.abs((1.0 / self.control_freq) * mean_t)
return js_energy
def _compute_ee_ft_integral(self):
"""
Returns the integral over time of the applied ee force-torque
"""
mean_ft = self.prev_pstep_ft - self.curr_pstep_ft
integral_ft = np.abs((1.0 / self.control_freq) * mean_ft)
return integral_ft
def render_additional_image(self, camera_name, camera_width, camera_height, camera_depth):
img = self.sim.render(camera_name=camera_name,
width=camera_width,
height=camera_height,
depth=camera_depth)
return img
| from collections import OrderedDict
import numpy as np
import robosuite.utils.transform_utils as T
from robosuite.environments import MujocoEnv
from robosuite.models.grippers import gripper_factory
from robosuite.models.robots import Panda
from robosuite.controllers.arm_controller import *
from collections import deque
import hjson
class PandaEnv(MujocoEnv):
"""Initializes a Panda robot environment."""
def __init__(
self,
controller_config_file,
controller,
gripper_type=None,
gripper_visualization=False,
use_indicator_object=False,
has_renderer=False,
has_offscreen_renderer=True,
render_collision_mesh=False,
render_visual_mesh=True,
control_freq=10,
horizon=1000,
ignore_done=False,
use_camera_obs=False,
camera_name="frontview",
camera_height=256,
camera_width=256,
camera_depth=False,
impedance_ctrl=True, # TODO
initial_policy=None, # TODO - currently not included in the config file (should be a function)
**kwargs
):
"""
Args:
controller_config_file (str): filepath to the corresponding controller config file that contains the
associated controller parameters
controller (str): Can be 'position', 'position_orientation', 'joint_velocity', 'joint_impedance', or
'joint_torque'. Specifies the type of controller to be used for dynamic trajectories
gripper_type (str): type of gripper, used to instantiate
gripper models from gripper factory.
gripper_visualization (bool): True if using gripper visualization.
Useful for teleoperation.
use_indicator_object (bool): if True, sets up an indicator object that
is useful for debugging.
has_renderer (bool): If true, render the simulation state in
a viewer instead of headless mode.
has_offscreen_renderer (bool): True if using off-screen rendering.
render_collision_mesh (bool): True if rendering collision meshes
in camera. False otherwise.
render_visual_mesh (bool): True if rendering visual meshes
in camera. False otherwise.
control_freq (float): how many control signals to receive
in every second. This sets the amount of simulation time
that passes between every action input.
horizon (int): Every episode lasts for exactly @horizon timesteps.
ignore_done (bool): True if never terminating the environment (ignore @horizon).
use_camera_obs (bool): if True, every observation includes a
rendered image.
camera_name (str): name of camera to be rendered. Must be
set if @use_camera_obs is True.
camera_height (int): height of camera frame.
camera_width (int): width of camera frame.
camera_depth (bool): True if rendering RGB-D, and RGB otherwise.
impedance_ctrl (bool) : True if we want to control impedance of the end effector
#########
**kwargs includes additional params that may be specified and will override values found in
the controller configuration file if the names match
"""
self.initial_policy = initial_policy
self.impedance_ctrl = impedance_ctrl
if self.impedance_ctrl:
# Load the appropriate controller
self._load_controller(controller, controller_config_file, kwargs)
if 'residual_policy_multiplier' in kwargs:
self.residual_policy_multiplier = kwargs['residual_policy_multiplier']
else:
self.residual_policy_multiplier = None
self.goal = np.zeros(3)
self.goal_orientation = np.zeros(3)
self.desired_force = np.zeros(3)
self.desired_torque = np.zeros(3)
if 'residual_policy_multiplier' in kwargs:
self.residual_policy_multiplier = kwargs['residual_policy_multiplier']
else:
self.residual_policy_multiplier = None
self.initial_policy = initial_policy
self.control_freq = control_freq
self.timestep = 0
# self.position_limits = [[0,0,0],[0,0,0]]
# self.orientation_limits = [[0,0,0],[0,0,0]]
self.ee_force = np.zeros(3)
self.ee_force_bias = np.zeros(3)
self.contact_threshold = 1 # Maximum contact variation allowed without contact [N]
self.ee_torque = np.zeros(3)
self.ee_torque_bias = np.zeros(3)
# self.controller = controller
# TODO - check that these are updated properly
self.total_kp = np.zeros(6)
self.total_damping = np.zeros(6)
self.n_avg_ee_acc = 10
self.has_gripper = gripper_type is not None
self.gripper_type = gripper_type
self.gripper_visualization = gripper_visualization
self.use_indicator_object = use_indicator_object
super().__init__(
has_renderer=has_renderer,
has_offscreen_renderer=has_offscreen_renderer,
render_collision_mesh=render_collision_mesh,
render_visual_mesh=render_visual_mesh,
control_freq=control_freq,
horizon=horizon,
ignore_done=ignore_done,
use_camera_obs=use_camera_obs,
camera_name=camera_name,
camera_height=camera_height,
camera_width=camera_height,
camera_depth=camera_depth,
)
# Current and previous policy step q values, joint torques, ft ee applied and actions
self.prev_pstep_ft = np.zeros(6)
self.curr_pstep_ft = np.zeros(6)
self.prev_pstep_a = np.zeros(self.dof)
self.curr_pstep_a = np.zeros(self.dof)
self.prev_pstep_q = np.zeros(len(self._ref_joint_vel_indexes))
self.curr_pstep_q = np.zeros(len(self._ref_joint_vel_indexes))
self.prev_pstep_t = np.zeros(len(self._ref_joint_vel_indexes))
self.curr_pstep_t = np.zeros(len(self._ref_joint_vel_indexes))
self.prev_pstep_ee_v = np.zeros(6)
self.curr_pstep_ee_v = np.zeros(6)
self.buffer_pstep_ee_v = deque(np.zeros(6) for _ in range(self.n_avg_ee_acc))
self.ee_acc = np.zeros(6)
self.total_ee_acc = np.zeros(6) # used to compute average
self.total_js_energy = np.zeros(len(self._ref_joint_vel_indexes))
self.torque_total = 0
self.joint_torques = 0
self.prev_ee_pos = np.zeros(7)
self.ee_pos = np.zeros(7)
## counting joint limits
self.joint_limit_count = 0
def _load_controller(self, controller_type, controller_file, kwargs):
"""
Loads controller to be used for dynamic trajectories
Controller_type is a specified controller, and controller_params is a config file containing the appropriate
parameters for that controller
Kwargs is kwargs passed from init call and represents individual params to override in controller config file
"""
# Load the controller config file
try:
with open(controller_file) as f:
params = hjson.load(f)
except FileNotFoundError:
print("Controller config file '{}' not found. Please check filepath and try again.".format(
controller_file))
controller_params = params[controller_type]
# Load additional arguments from kwargs and override the prior config-file loaded ones
for key, value in kwargs.items():
if key in controller_params:
controller_params[key] = value
if controller_type == ControllerType.POS:
self.controller = PositionController(**controller_params)
elif controller_type == ControllerType.POS_ORI:
self.controller = PositionOrientationController(**controller_params)
elif controller_type == ControllerType.JOINT_IMP:
self.controller = JointImpedanceController(**controller_params)
elif controller_type == ControllerType.JOINT_TORQUE:
self.controller = JointTorqueController(**controller_params)
else:
self.controller = JointVelocityController(**controller_params)
def _load_model(self):
"""
Loads robot and optionally add grippers.
"""
super()._load_model()
# Use xml that has motor torque actuators enabled
self.mujoco_robot = Panda(xml_path="robots/panda/robot_torque.xml")
if self.has_gripper:
self.gripper = gripper_factory(self.gripper_type)
if not self.gripper_visualization:
self.gripper.hide_visualization()
self.mujoco_robot.add_gripper("right_hand", self.gripper)
def _reset_internal(self):
"""
Sets initial pose of arm and grippers.
"""
super()._reset_internal()
self.sim.data.qpos[self._ref_joint_pos_indexes] = self.mujoco_robot.init_qpos
if self.has_gripper:
self.sim.data.qpos[
self._ref_joint_gripper_actuator_indexes
] = self.gripper.init_qpos
self.controller.reset()
self.goal = np.zeros(3)
self.goal_orientation = np.zeros(3)
self.desired_force = np.zeros(3)
self.desired_torque = np.zeros(3)
self.prev_pstep_q = np.array(self.mujoco_robot.init_qpos)
self.curr_pstep_q = np.array(self.mujoco_robot.init_qpos)
self.prev_pstep_a = np.zeros(self.dof)
self.curr_pstep_a = np.zeros(self.dof)
self.prev_pstep_ee_v = np.zeros(6)
self.curr_pstep_ee_v = np.zeros(6)
self.buffer_pstep_ee_v = deque(np.zeros(6) for _ in range(self.n_avg_ee_acc))
self.ee_acc = np.zeros(6)
self.total_ee_acc = np.zeros(6) # used to compute average
self.total_kp = np.zeros(6)
self.total_damping = np.zeros(6)
self.total_js_energy = np.zeros(len(self._ref_joint_vel_indexes))
self.prev_ee_pos = np.zeros(7)
self.ee_pos = np.zeros(7)
self.total_joint_torque = 0
self.joint_torques = 0
def _get_reference(self):
"""
Sets up necessary reference for robots, grippers, and objects.
"""
super()._get_reference()
# indices for joints in qpos, qvel
self.robot_joints = list(self.mujoco_robot.joints)
self._ref_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in self.robot_joints
]
self._ref_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in self.robot_joints
]
if self.use_indicator_object:
ind_qpos = self.sim.model.get_joint_qpos_addr("pos_indicator")
self._ref_indicator_pos_low, self._ref_indicator_pos_high = ind_qpos
ind_qvel = self.sim.model.get_joint_qvel_addr("pos_indicator")
self._ref_indicator_vel_low, self._ref_indicator_vel_high = ind_qvel
self.indicator_id = self.sim.model.body_name2id("pos_indicator")
# indices for grippers in qpos, qvel
if self.has_gripper:
self.gripper_joints = list(self.gripper.joints)
self._ref_gripper_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_joints
]
self._ref_gripper_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_joints
]
# indices for joint pos actuation, joint vel actuation, gripper actuation
self._ref_joint_pos_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("pos")
]
self._ref_joint_vel_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("vel")
]
if self.has_gripper:
self._ref_joint_gripper_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("gripper")
]
# IDs of sites for gripper visualization
self.eef_site_id = self.sim.model.site_name2id("grip_site")
self.eef_cylinder_id = self.sim.model.site_name2id("grip_site_cylinder")
def move_indicator(self, pos):
"""
Sets 3d position of indicator object to @pos.
"""
if self.use_indicator_object:
index = self._ref_indicator_pos_low
self.sim.data.qpos[index : index + 3] = pos
def _pre_action(self, action, policy_step):
"""
Overrides the superclass method to actuate the robot with the
passed joint velocities and gripper control.
Args:
action (numpy array): The control to apply to the robot. The first
@self.mujoco_robot.dof dimensions should be the desired
normalized joint velocities and if the robot has
a gripper, the next @self.gripper.dof dimensions should be
actuation controls for the gripper.
"""
self.policy_step = policy_step
# Make sure action length is correct
assert len(action) == self.dof, "environment got invalid action dimension"
# i.e.: not using new controller
if not self.impedance_ctrl:
# clip actions into valid range
low, high = self.action_spec
action = np.clip(action, low, high)
if self.has_gripper:
arm_action = action[: self.mujoco_robot.dof]
gripper_action_in = action[
self.mujoco_robot.dof: self.mujoco_robot.dof + self.gripper.dof
]
gripper_action_actual = self.gripper.format_action(gripper_action_in)
action = np.concatenate([arm_action, gripper_action_actual])
# rescale normalized action to control ranges
ctrl_range = self.sim.model.actuator_ctrlrange
bias = 0.5 * (ctrl_range[:, 1] + ctrl_range[:, 0])
weight = 0.5 * (ctrl_range[:, 1] - ctrl_range[:, 0])
applied_action = bias + weight * action
self.sim.data.ctrl[self._ref_joint_vel_indexes] = applied_action
# gravity compensation
self.sim.data.qfrc_applied[
self._ref_joint_vel_indexes
] = self.sim.data.qfrc_bias[self._ref_joint_vel_indexes]
if self.use_indicator_object:
self.sim.data.qfrc_applied[
self._ref_indicator_vel_low: self._ref_indicator_vel_high
] = self.sim.data.qfrc_bias[
self._ref_indicator_vel_low: self._ref_indicator_vel_high
]
# using new controller
else:
# Split action into joint control and peripheral (i.e.: gripper) control (as specified by individual gripper)
gripper_action = []
if self.has_gripper:
gripper_action = action[self.controller.control_dim:] # all indexes past controller dimension indexes
action = action[:self.controller.control_dim]
# TODO
# First, get joint space action
# action = action.copy() # ensure that we don't change the action outside of this scope
self.controller.update_model(self.sim, id_name='right_hand', joint_index=self._ref_joint_pos_indexes)
torques = self.controller.action_to_torques(action,
self.policy_step) # this scales and clips the actions correctly
if self.initial_policy:
initial_policy_torques = self.initial_policy.action_to_torques(self.sim, 'right_hand',
self._ref_joint_pos_indexes,
self.initial_policy_action,
self.policy_step)
self.residual_torques = torques
self.initial_torques = initial_policy_torques
if self.residual_policy_multiplier is not None:
torques = self.residual_policy_multiplier * torques + initial_policy_torques
else:
torques = torques + initial_policy_torques # TODO
self.total_joint_torque += np.sum(abs(torques))
self.joint_torques = torques
# Get gripper action, if applicable
if self.has_gripper:
gripper_action_actual = self.gripper.format_action(gripper_action)
# rescale normalized gripper action to control ranges
ctrl_range = self.sim.model.actuator_ctrlrange[self._ref_gripper_joint_vel_indexes]
bias = 0.5 * (ctrl_range[:, 1] + ctrl_range[:, 0])
weight = 0.5 * (ctrl_range[:, 1] - ctrl_range[:, 0])
applied_gripper_action = bias + weight * gripper_action_actual
self.sim.data.ctrl[self._ref_gripper_joint_vel_indexes] = applied_gripper_action
# Now, control both gripper and joints
self.sim.data.ctrl[self._ref_joint_vel_indexes] = self.sim.data.qfrc_bias[
self._ref_joint_vel_indexes] + torques
if self.policy_step:
self.prev_pstep_q = np.array(self.curr_pstep_q)
self.curr_pstep_q = np.array(self.sim.data.qpos[self._ref_joint_vel_indexes])
self.prev_pstep_a = np.array(self.curr_pstep_a)
self.curr_pstep_a = np.array(action) # .copy()) # TODO
self.prev_pstep_t = np.array(self.curr_pstep_t)
self.curr_pstep_t = np.array(self.sim.data.ctrl[self._ref_joint_vel_indexes])
self.prev_pstep_ft = np.array(self.curr_pstep_ft)
# Assumes a ft sensor on the wrist
force_sensor_id = self.sim.model.sensor_name2id("force_ee")
force_ee = self.sim.data.sensordata[force_sensor_id * 3: force_sensor_id * 3 + 3]
torque_sensor_id = self.sim.model.sensor_name2id("torque_ee")
torque_ee = self.sim.data.sensordata[torque_sensor_id * 3: torque_sensor_id * 3 + 3]
self.curr_pstep_ft = np.concatenate([force_ee, torque_ee])
self.prev_pstep_ee_v = self.curr_pstep_ee_v
self.curr_pstep_ee_v = np.concatenate(
[self.sim.data.body_xvelp[self.sim.model.body_name2id("right_hand")],
self.sim.data.body_xvelr[self.sim.model.body_name2id("right_hand")]])
self.buffer_pstep_ee_v.popleft()
self.buffer_pstep_ee_v.append(self.curr_pstep_ee_v)
# convert to matrix
buffer_mat = []
for v in self.buffer_pstep_ee_v:
buffer_mat += [v]
buffer_mat = np.vstack(buffer_mat)
diffs = np.diff(buffer_mat, axis=0)
diffs *= self.control_freq
diffs = np.vstack([self.ee_acc, diffs])
diffs.reshape((self.n_avg_ee_acc, 6))
self.ee_acc = np.array(
[np.convolve(col, np.ones((self.n_avg_ee_acc,)) / self.n_avg_ee_acc, mode='valid')[0] for col in
diffs.transpose()])
def _post_action(self, action):
"""
(Optional) does gripper visualization after actions.
"""
self.prev_ee_pos = self.ee_pos
self.ee_pos = np.array(self.sim.data.body_xpos[self.sim.model.body_name2id('right_hand')])
force_sensor_id = self.sim.model.sensor_name2id("force_ee")
self.ee_force = np.array(self.sim.data.sensordata[force_sensor_id * 3: force_sensor_id * 3 + 3])
if np.linalg.norm(self.ee_force_bias) == 0:
self.ee_force_bias = self.ee_force
torque_sensor_id = self.sim.model.sensor_name2id("torque_ee")
self.ee_torque = np.array(self.sim.data.sensordata[torque_sensor_id * 3: torque_sensor_id * 3 + 3])
if np.linalg.norm(self.ee_torque_bias) == 0:
self.ee_torque_bias = self.ee_torque
ret = super()._post_action(action)
self._gripper_visualization()
return ret
def _get_observation(self):
"""
Returns an OrderedDict containing observations [(name_string, np.array), ...].
Important keys:
robot-state: contains robot-centric information.
"""
di = super()._get_observation()
# camera observations
if self.use_camera_obs:
camera_obs = self.sim.render(camera_name=self.camera_name,
width=self.camera_width,
height=self.camera_height,
depth=self.camera_depth)
if self.camera_depth:
di['image'], di['depth'] = camera_obs
else:
di['image'] = camera_obs
# Skip for now, not worth importing cv2 just for this
# if self.visualize_offscreen and not self.real_robot:
# cv2.imshow('Robot observation', np.flip(camera_obs[..., ::-1], 0))
# cv2.waitKey(10)
# proprioceptive features
di["joint_pos"] = np.array(
[self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes]
)
di["joint_vel"] = np.array(
[self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes]
)
robot_states = [
np.sin(di["joint_pos"]),
np.cos(di["joint_pos"]),
di["joint_vel"],
]
if self.has_gripper:
di["gripper_qpos"] = np.array(
[self.sim.data.qpos[x] for x in self._ref_gripper_joint_pos_indexes]
)
di["gripper_qvel"] = np.array(
[self.sim.data.qvel[x] for x in self._ref_gripper_joint_vel_indexes]
)
di["eef_pos"] = np.array(self.sim.data.body_xpos[self.sim.model.body_name2id('right_hand')])
di["eef_quat"] = T.convert_quat(
self.sim.data.get_body_xquat("right_hand"), to="xyzw"
)
di["eef_vlin"] = np.array(self.sim.data.get_body_xvelp('right_hand'))
di["eef_vang"] = np.array(self.sim.data.get_body_xvelr('right_hand'))
# add in gripper information
robot_states.extend([di["gripper_qpos"], di["eef_pos"], di["eef_quat"], di["eef_vlin"], di["eef_vang"]])
di["robot-state"] = np.concatenate(robot_states)
di["prev-act"] = self.prev_pstep_a
# Adding binary contact observation
in_contact = np.linalg.norm(self.ee_force - self.ee_force_bias) > self.contact_threshold
di["contact-obs"] = in_contact
return di
@property
def action_spec(self):
"""
Action lower/upper limits per dimension.
"""
low = np.ones(self.dof) * -1.
high = np.ones(self.dof) * 1.
return low, high
@property
def dof(self):
"""
Returns the DoF of the robot (with grippers).
"""
if self.impedance_ctrl:
dof = self.controller.action_dim
else:
dof = self.mujoco_robot.dof
if self.has_gripper:
dof += self.gripper.dof
return dof
def pose_in_base_from_name(self, name):
"""
A helper function that takes in a named data field and returns the pose
of that object in the base frame.
"""
pos_in_world = self.sim.data.get_body_xpos(name)
rot_in_world = self.sim.data.get_body_xmat(name).reshape((3, 3))
pose_in_world = T.make_pose(pos_in_world, rot_in_world)
base_pos_in_world = self.sim.data.get_body_xpos("base")
base_rot_in_world = self.sim.data.get_body_xmat("base").reshape((3, 3))
base_pose_in_world = T.make_pose(base_pos_in_world, base_rot_in_world)
world_pose_in_base = T.pose_inv(base_pose_in_world)
pose_in_base = T.pose_in_A_to_pose_in_B(pose_in_world, world_pose_in_base)
return pose_in_base
def set_robot_joint_positions(self, jpos):
"""
Helper method to force robot joint positions to the passed values.
"""
self.sim.data.qpos[self._ref_joint_pos_indexes] = jpos
self.sim.forward()
@property
def _right_hand_joint_cartesian_pose(self):
"""
Returns the cartesian pose of the last robot joint in base frame of robot.
"""
return self.pose_in_base_from_name("right_l6")
@property
def _right_hand_pose(self):
"""
Returns eef pose in base frame of robot.
"""
return self.pose_in_base_from_name("right_hand")
@property
def _right_hand_quat(self):
"""
Returns eef quaternion in base frame of robot.
"""
return T.mat2quat(self._right_hand_orn)
@property
def _right_hand_total_velocity(self):
"""
Returns the total eef velocity (linear + angular) in the base frame
as a numpy array of shape (6,)
"""
# Use jacobian to translate joint velocities to end effector velocities.
Jp = self.sim.data.get_body_jacp("right_hand").reshape((3, -1))
Jp_joint = Jp[:, self._ref_joint_vel_indexes]
Jr = self.sim.data.get_body_jacr("right_hand").reshape((3, -1))
Jr_joint = Jr[:, self._ref_joint_vel_indexes]
eef_lin_vel = Jp_joint.dot(self._joint_velocities)
eef_rot_vel = Jr_joint.dot(self._joint_velocities)
return np.concatenate([eef_lin_vel, eef_rot_vel])
@property
def _right_hand_pos(self):
"""
Returns position of eef in base frame of robot.
"""
eef_pose_in_base = self._right_hand_pose
return eef_pose_in_base[:3, 3]
@property
def _right_hand_orn(self):
"""
Returns orientation of eef in base frame of robot as a rotation matrix.
"""
eef_pose_in_base = self._right_hand_pose
return eef_pose_in_base[:3, :3]
@property
def _right_hand_vel(self):
"""
Returns velocity of eef in base frame of robot.
"""
return self._right_hand_total_velocity[:3]
@property
def _right_hand_ang_vel(self):
"""
Returns angular velocity of eef in base frame of robot.
"""
return self._right_hand_total_velocity[3:]
@property
def _joint_positions(self):
"""
Returns a numpy array of joint positions.
Panda robots have 7 joints and positions are in rotation angles.
"""
return self.sim.data.qpos[self._ref_joint_pos_indexes]
@property
def _joint_velocities(self):
"""
Returns a numpy array of joint velocities.
Panda robots have 7 joints and velocities are angular velocities.
"""
return self.sim.data.qvel[self._ref_joint_vel_indexes]
def _gripper_visualization(self):
"""
Do any needed visualization here.
"""
# By default, don't do any coloring.
self.sim.model.site_rgba[self.eef_site_id] = [0., 0., 0., 0.]
def _check_contact(self):
"""
Returns True if the gripper is in contact with another object.
"""
return False
def _check_arm_contact(self):
"""
Returns True if the arm is in contact with another object.
"""
collision = False
for contact in self.sim.data.contact[:self.sim.data.ncon]:
if self.sim.model.geom_id2name(contact.geom1) in self.mujoco_robot.contact_geoms or \
self.sim.model.geom_id2name(contact.geom2) in self.mujoco_robot.contact_geoms:
collision = True
break
return collision
def _check_q_limits(self):
"""
Returns True if the arm is in joint limits or very close to.
"""
joint_limits = False
tolerance = 0.1
for (idx, (q, q_limits)) in enumerate(
zip(self.sim.data.qpos[self._ref_joint_pos_indexes], self.sim.model.jnt_range)):
if not (q > q_limits[0] + tolerance and q < q_limits[1] - tolerance):
print("Joint limit reached in joint " + str(idx))
joint_limits = True
self.joint_limit_count += 1
return joint_limits
def _compute_q_delta(self):
"""
Returns the change in joint space configuration between previous and current steps
"""
q_delta = self.prev_pstep_q - self.curr_pstep_q
return q_delta
def _compute_t_delta(self):
"""
Returns the change in joint space configuration between previous and current steps
"""
t_delta = self.prev_pstep_t - self.curr_pstep_t
return t_delta
def _compute_a_delta(self):
"""
Returns the change in policy action between previous and current steps
"""
a_delta = self.prev_pstep_a - self.curr_pstep_a
return a_delta
def _compute_ft_delta(self):
"""
Returns the change in policy action between previous and current steps
"""
ft_delta = self.prev_pstep_ft - self.curr_pstep_ft
return ft_delta
def _compute_js_energy(self):
"""
Returns the energy consumed by each joint between previous and current steps
"""
# Mean torque applied
mean_t = self.prev_pstep_t - self.curr_pstep_t
# We assume in the motors torque is proportional to current (and voltage is constant)
# In that case the amount of power scales proportional to the torque and the energy is the
# time integral of that
js_energy = np.abs((1.0 / self.control_freq) * mean_t)
return js_energy
def _compute_ee_ft_integral(self):
"""
Returns the integral over time of the applied ee force-torque
"""
mean_ft = self.prev_pstep_ft - self.curr_pstep_ft
integral_ft = np.abs((1.0 / self.control_freq) * mean_ft)
return integral_ft
def render_additional_image(self, camera_name, camera_width, camera_height, camera_depth):
img = self.sim.render(camera_name=camera_name,
width=camera_width,
height=camera_height,
depth=camera_depth)
return img
| en | 0.753621 | Initializes a Panda robot environment. # TODO # TODO - currently not included in the config file (should be a function) Args: controller_config_file (str): filepath to the corresponding controller config file that contains the associated controller parameters controller (str): Can be 'position', 'position_orientation', 'joint_velocity', 'joint_impedance', or 'joint_torque'. Specifies the type of controller to be used for dynamic trajectories gripper_type (str): type of gripper, used to instantiate gripper models from gripper factory. gripper_visualization (bool): True if using gripper visualization. Useful for teleoperation. use_indicator_object (bool): if True, sets up an indicator object that is useful for debugging. has_renderer (bool): If true, render the simulation state in a viewer instead of headless mode. has_offscreen_renderer (bool): True if using off-screen rendering. render_collision_mesh (bool): True if rendering collision meshes in camera. False otherwise. render_visual_mesh (bool): True if rendering visual meshes in camera. False otherwise. control_freq (float): how many control signals to receive in every second. This sets the amount of simulation time that passes between every action input. horizon (int): Every episode lasts for exactly @horizon timesteps. ignore_done (bool): True if never terminating the environment (ignore @horizon). use_camera_obs (bool): if True, every observation includes a rendered image. camera_name (str): name of camera to be rendered. Must be set if @use_camera_obs is True. camera_height (int): height of camera frame. camera_width (int): width of camera frame. camera_depth (bool): True if rendering RGB-D, and RGB otherwise. impedance_ctrl (bool) : True if we want to control impedance of the end effector ######### **kwargs includes additional params that may be specified and will override values found in the controller configuration file if the names match # Load the appropriate controller # self.position_limits = [[0,0,0],[0,0,0]] # self.orientation_limits = [[0,0,0],[0,0,0]] # Maximum contact variation allowed without contact [N] # self.controller = controller # TODO - check that these are updated properly # Current and previous policy step q values, joint torques, ft ee applied and actions # used to compute average ## counting joint limits Loads controller to be used for dynamic trajectories Controller_type is a specified controller, and controller_params is a config file containing the appropriate parameters for that controller Kwargs is kwargs passed from init call and represents individual params to override in controller config file # Load the controller config file # Load additional arguments from kwargs and override the prior config-file loaded ones Loads robot and optionally add grippers. # Use xml that has motor torque actuators enabled Sets initial pose of arm and grippers. # used to compute average Sets up necessary reference for robots, grippers, and objects. # indices for joints in qpos, qvel # indices for grippers in qpos, qvel # indices for joint pos actuation, joint vel actuation, gripper actuation # IDs of sites for gripper visualization Sets 3d position of indicator object to @pos. Overrides the superclass method to actuate the robot with the passed joint velocities and gripper control. Args: action (numpy array): The control to apply to the robot. The first @self.mujoco_robot.dof dimensions should be the desired normalized joint velocities and if the robot has a gripper, the next @self.gripper.dof dimensions should be actuation controls for the gripper. # Make sure action length is correct # i.e.: not using new controller # clip actions into valid range # rescale normalized action to control ranges # gravity compensation # using new controller # Split action into joint control and peripheral (i.e.: gripper) control (as specified by individual gripper) # all indexes past controller dimension indexes # TODO # First, get joint space action # action = action.copy() # ensure that we don't change the action outside of this scope # this scales and clips the actions correctly # TODO # Get gripper action, if applicable # rescale normalized gripper action to control ranges # Now, control both gripper and joints # .copy()) # TODO # Assumes a ft sensor on the wrist # convert to matrix (Optional) does gripper visualization after actions. Returns an OrderedDict containing observations [(name_string, np.array), ...]. Important keys: robot-state: contains robot-centric information. # camera observations # Skip for now, not worth importing cv2 just for this # if self.visualize_offscreen and not self.real_robot: # cv2.imshow('Robot observation', np.flip(camera_obs[..., ::-1], 0)) # cv2.waitKey(10) # proprioceptive features # add in gripper information # Adding binary contact observation Action lower/upper limits per dimension. Returns the DoF of the robot (with grippers). A helper function that takes in a named data field and returns the pose of that object in the base frame. Helper method to force robot joint positions to the passed values. Returns the cartesian pose of the last robot joint in base frame of robot. Returns eef pose in base frame of robot. Returns eef quaternion in base frame of robot. Returns the total eef velocity (linear + angular) in the base frame as a numpy array of shape (6,) # Use jacobian to translate joint velocities to end effector velocities. Returns position of eef in base frame of robot. Returns orientation of eef in base frame of robot as a rotation matrix. Returns velocity of eef in base frame of robot. Returns angular velocity of eef in base frame of robot. Returns a numpy array of joint positions. Panda robots have 7 joints and positions are in rotation angles. Returns a numpy array of joint velocities. Panda robots have 7 joints and velocities are angular velocities. Do any needed visualization here. # By default, don't do any coloring. Returns True if the gripper is in contact with another object. Returns True if the arm is in contact with another object. Returns True if the arm is in joint limits or very close to. Returns the change in joint space configuration between previous and current steps Returns the change in joint space configuration between previous and current steps Returns the change in policy action between previous and current steps Returns the change in policy action between previous and current steps Returns the energy consumed by each joint between previous and current steps # Mean torque applied # We assume in the motors torque is proportional to current (and voltage is constant) # In that case the amount of power scales proportional to the torque and the energy is the # time integral of that Returns the integral over time of the applied ee force-torque | 2.336223 | 2 |
src/sqlizer/conversionstatus.py | sqlizer-io/sqlizer-client-py | 11 | 6632971 |
class ConversionStatus:
NotCreated = None
New = 'New'
Uploaded = 'Uploaded'
Queued = 'Queued'
Analyzing = 'Analyzing'
Processing = 'Processing'
Complete = 'Complete'
Failed = 'Failed'
SubscriptionRequired = 'SubscriptionRequired'
PaymentRequired = 'PaymentRequired' |
class ConversionStatus:
NotCreated = None
New = 'New'
Uploaded = 'Uploaded'
Queued = 'Queued'
Analyzing = 'Analyzing'
Processing = 'Processing'
Complete = 'Complete'
Failed = 'Failed'
SubscriptionRequired = 'SubscriptionRequired'
PaymentRequired = 'PaymentRequired' | none | 1 | 1.434139 | 1 |
|
s3/replication/common/src/s3replicationcommon/s3_put_object.py | rajkumarpatel2602/cortx-multisite | 1 | 6632972 | <gh_stars>1-10
#
# Copyright (c) 2021 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email <EMAIL> or <EMAIL>.
#
import aiohttp
import sys
from s3replicationcommon.aws_v4_signer import AWSV4Signer
from s3replicationcommon.log import fmt_reqid_log
from s3replicationcommon.s3_common import S3RequestState
from s3replicationcommon.timer import Timer
class S3AsyncPutObject:
def __init__(self, session, request_id,
bucket_name, object_name, object_size):
"""Initialise."""
self._session = session
# Request id for better logging.
self._request_id = request_id
self._logger = session.logger
self._bucket_name = bucket_name
self._object_name = object_name
self._object_size = object_size
self.remote_down = False
self._http_status = None
self._timer = Timer()
self._state = S3RequestState.INITIALISED
def get_state(self):
"""Returns current request state."""
return self._state
def get_response_header(self, header_key):
"""Returns response http header value."""
if self._state == S3RequestState.COMPLETED:
return self._response_headers[header_key]
return None
def get_execution_time(self):
"""Return total time for PUT Object operation."""
return self._timer.elapsed_time_ms()
def get_etag(self):
"""Returns ETag for object."""
return self._response_headers["ETag"].strip("\"")
# data_reader is object with fetch method that can yeild data
async def send(self, data_reader, transfer_size):
self._state = S3RequestState.RUNNING
self._data_reader = data_reader
request_uri = AWSV4Signer.fmt_s3_request_uri(
self._bucket_name, self._object_name)
query_params = ""
body = ""
headers = AWSV4Signer(
self._session.endpoint,
self._session.service_name,
self._session.region,
self._session.access_key,
self._session.secret_key).prepare_signed_header(
'PUT',
request_uri,
query_params,
body)
if (headers['Authorization'] is None):
self._logger.error(fmt_reqid_log(self._request_id) +
"Failed to generate v4 signature")
sys.exit(-1)
headers["Content-Length"] = str(self._object_size)
self._logger.info(fmt_reqid_log(self._request_id) +
"PUT on {}".format(
self._session.endpoint + request_uri))
self._logger.debug(fmt_reqid_log(self._request_id) +
"PUT with headers {}".format(headers))
self._timer.start()
try:
async with self._session.get_client_session().put(
self._session.endpoint + request_uri,
headers=headers,
# Read all data from data_reader
data=data_reader.fetch(transfer_size)) as resp:
self._timer.stop()
if data_reader.get_state() != S3RequestState.ABORTED:
self._http_status = resp.status
self._response_headers = resp.headers
self._logger.info(
fmt_reqid_log(self._request_id) +
'PUT Object completed with http status: {}'.format(
resp.status))
# Validate if upload object etag matches.
if self.get_etag() != data_reader.get_etag():
self._state = S3RequestState.FAILED
error_msg = "ETag mismatch."
self._logger.error(
fmt_reqid_log(self._request_id) +
'Error Response: {}'.format(error_msg))
if resp.status == 200:
self._state = S3RequestState.COMPLETED
else:
error_msg = await resp.text()
self._logger.error(
fmt_reqid_log(self._request_id) +
'Error Response: {}'.format(error_msg))
self._state = S3RequestState.FAILED
except aiohttp.client_exceptions.ClientConnectorError as e:
self._timer.stop()
self.remote_down = True
self._state = S3RequestState.FAILED
self._logger.error(fmt_reqid_log(self._request_id) +
"Failed to connect to S3: " + str(e))
return
def pause(self):
self._state = S3RequestState.PAUSED
# XXX Take real pause action
def resume(self):
self._state = S3RequestState.PAUSED
# XXX Take real resume action
def abort(self):
self._state = S3RequestState.ABORTED
# Abort the reader so that PUT can stop.
self._data_reader.abort()
| #
# Copyright (c) 2021 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email <EMAIL> or <EMAIL>.
#
import aiohttp
import sys
from s3replicationcommon.aws_v4_signer import AWSV4Signer
from s3replicationcommon.log import fmt_reqid_log
from s3replicationcommon.s3_common import S3RequestState
from s3replicationcommon.timer import Timer
class S3AsyncPutObject:
def __init__(self, session, request_id,
bucket_name, object_name, object_size):
"""Initialise."""
self._session = session
# Request id for better logging.
self._request_id = request_id
self._logger = session.logger
self._bucket_name = bucket_name
self._object_name = object_name
self._object_size = object_size
self.remote_down = False
self._http_status = None
self._timer = Timer()
self._state = S3RequestState.INITIALISED
def get_state(self):
"""Returns current request state."""
return self._state
def get_response_header(self, header_key):
"""Returns response http header value."""
if self._state == S3RequestState.COMPLETED:
return self._response_headers[header_key]
return None
def get_execution_time(self):
"""Return total time for PUT Object operation."""
return self._timer.elapsed_time_ms()
def get_etag(self):
"""Returns ETag for object."""
return self._response_headers["ETag"].strip("\"")
# data_reader is object with fetch method that can yeild data
async def send(self, data_reader, transfer_size):
self._state = S3RequestState.RUNNING
self._data_reader = data_reader
request_uri = AWSV4Signer.fmt_s3_request_uri(
self._bucket_name, self._object_name)
query_params = ""
body = ""
headers = AWSV4Signer(
self._session.endpoint,
self._session.service_name,
self._session.region,
self._session.access_key,
self._session.secret_key).prepare_signed_header(
'PUT',
request_uri,
query_params,
body)
if (headers['Authorization'] is None):
self._logger.error(fmt_reqid_log(self._request_id) +
"Failed to generate v4 signature")
sys.exit(-1)
headers["Content-Length"] = str(self._object_size)
self._logger.info(fmt_reqid_log(self._request_id) +
"PUT on {}".format(
self._session.endpoint + request_uri))
self._logger.debug(fmt_reqid_log(self._request_id) +
"PUT with headers {}".format(headers))
self._timer.start()
try:
async with self._session.get_client_session().put(
self._session.endpoint + request_uri,
headers=headers,
# Read all data from data_reader
data=data_reader.fetch(transfer_size)) as resp:
self._timer.stop()
if data_reader.get_state() != S3RequestState.ABORTED:
self._http_status = resp.status
self._response_headers = resp.headers
self._logger.info(
fmt_reqid_log(self._request_id) +
'PUT Object completed with http status: {}'.format(
resp.status))
# Validate if upload object etag matches.
if self.get_etag() != data_reader.get_etag():
self._state = S3RequestState.FAILED
error_msg = "ETag mismatch."
self._logger.error(
fmt_reqid_log(self._request_id) +
'Error Response: {}'.format(error_msg))
if resp.status == 200:
self._state = S3RequestState.COMPLETED
else:
error_msg = await resp.text()
self._logger.error(
fmt_reqid_log(self._request_id) +
'Error Response: {}'.format(error_msg))
self._state = S3RequestState.FAILED
except aiohttp.client_exceptions.ClientConnectorError as e:
self._timer.stop()
self.remote_down = True
self._state = S3RequestState.FAILED
self._logger.error(fmt_reqid_log(self._request_id) +
"Failed to connect to S3: " + str(e))
return
def pause(self):
self._state = S3RequestState.PAUSED
# XXX Take real pause action
def resume(self):
self._state = S3RequestState.PAUSED
# XXX Take real resume action
def abort(self):
self._state = S3RequestState.ABORTED
# Abort the reader so that PUT can stop.
self._data_reader.abort() | en | 0.792629 | # # Copyright (c) 2021 Seagate Technology LLC and/or its Affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # For any questions about this software or licensing, # please email <EMAIL> or <EMAIL>. # Initialise. # Request id for better logging. Returns current request state. Returns response http header value. Return total time for PUT Object operation. Returns ETag for object. # data_reader is object with fetch method that can yeild data # Read all data from data_reader # Validate if upload object etag matches. # XXX Take real pause action # XXX Take real resume action # Abort the reader so that PUT can stop. | 1.645004 | 2 |
2DTFIM_2DRNN/run_2dTFIM.py | MatteoMartinelli97/RNNWavefunctions | 47 | 6632973 | <reponame>MatteoMartinelli97/RNNWavefunctions
from Training2DRNN_2DTFIM import run_2DTFIM
#numsteps = number of training iterations
#systemsize_x = the size of the x-dimension of the square lattice
#systemsize_x = the size of the y-dimension of the square lattice
#Bx = transverse magnetic field
#numsamples = number of samples used for training
#num_units = number of memory units of the hidden state of the RNN
#num_layers is not supported yet, stay tuned!
RNNEnergy, varRNNEnergy = run_2DTFIM(numsteps = 2*10**4, systemsize_x = 4, systemsize_y = 4, Bx = 3, num_units = 50, numsamples = 500, learningrate = 5e-3, seed = 111)
#RNNEnergy is a numpy array of the variational energy of the pRNN wavefunction
#varRNNEnergy is a numpy array of the variance of the variational energy of the pRNN wavefunction
| from Training2DRNN_2DTFIM import run_2DTFIM
#numsteps = number of training iterations
#systemsize_x = the size of the x-dimension of the square lattice
#systemsize_x = the size of the y-dimension of the square lattice
#Bx = transverse magnetic field
#numsamples = number of samples used for training
#num_units = number of memory units of the hidden state of the RNN
#num_layers is not supported yet, stay tuned!
RNNEnergy, varRNNEnergy = run_2DTFIM(numsteps = 2*10**4, systemsize_x = 4, systemsize_y = 4, Bx = 3, num_units = 50, numsamples = 500, learningrate = 5e-3, seed = 111)
#RNNEnergy is a numpy array of the variational energy of the pRNN wavefunction
#varRNNEnergy is a numpy array of the variance of the variational energy of the pRNN wavefunction | en | 0.79471 | #numsteps = number of training iterations #systemsize_x = the size of the x-dimension of the square lattice #systemsize_x = the size of the y-dimension of the square lattice #Bx = transverse magnetic field #numsamples = number of samples used for training #num_units = number of memory units of the hidden state of the RNN #num_layers is not supported yet, stay tuned! #RNNEnergy is a numpy array of the variational energy of the pRNN wavefunction #varRNNEnergy is a numpy array of the variance of the variational energy of the pRNN wavefunction | 2.958517 | 3 |
books_scrapy/loaders.py | hdtls/books-scrapy | 0 | 6632974 | from books_scrapy.items import (
Manga,
Author,
MangaArea,
MangaCategory,
MangaChapter,
PHAsset,
)
from itemloaders.utils import arg_to_iter
from itemloaders.processors import Compose, Identity, MapCompose, TakeFirst
from scrapy.loader import ItemLoader
def splitting(value):
if not value:
return []
separator = None
if "," in value:
separator = ","
elif " " in value:
separator = " "
elif "x" in value:
separator = "x"
return list(map(lambda e: e.strip(), value.split(separator)))
class MangaLoader(ItemLoader):
default_input_processor = MapCompose(str.strip)
default_output_processor = TakeFirst()
default_item_class = Manga
authors_in = MapCompose(splitting, str.strip, lambda name: Author(username=name))
authors_out = Identity()
area_in = MapCompose(str.strip, lambda name: MangaArea(name=name))
aliases_in = MapCompose(splitting, str.strip)
background_image_in = MapCompose(str.strip, lambda url: dict(ref_url=url))
categories_in = MapCompose(str.strip, lambda name: MangaCategory(name=name))
categories_out = Identity()
cover_image_in = background_image_in
promo_image_in = background_image_in
ref_urls_out = Identity()
schedule_in = MapCompose(lambda s: 1 if "完结" in s else 0)
class ChapterLoader(ItemLoader):
default_input_processor = MapCompose(str.strip)
default_output_processor = TakeFirst()
default_item_class = MangaChapter
ref_urls_out = Identity()
cover_image_in = MangaLoader.cover_image_in
assets_in = Compose(
lambda val: [
PHAsset(files=[dict(ref_url=url) for url in arg_to_iter(urls)])
for urls in [arg_to_iter(val)]
if urls
]
)
| from books_scrapy.items import (
Manga,
Author,
MangaArea,
MangaCategory,
MangaChapter,
PHAsset,
)
from itemloaders.utils import arg_to_iter
from itemloaders.processors import Compose, Identity, MapCompose, TakeFirst
from scrapy.loader import ItemLoader
def splitting(value):
if not value:
return []
separator = None
if "," in value:
separator = ","
elif " " in value:
separator = " "
elif "x" in value:
separator = "x"
return list(map(lambda e: e.strip(), value.split(separator)))
class MangaLoader(ItemLoader):
default_input_processor = MapCompose(str.strip)
default_output_processor = TakeFirst()
default_item_class = Manga
authors_in = MapCompose(splitting, str.strip, lambda name: Author(username=name))
authors_out = Identity()
area_in = MapCompose(str.strip, lambda name: MangaArea(name=name))
aliases_in = MapCompose(splitting, str.strip)
background_image_in = MapCompose(str.strip, lambda url: dict(ref_url=url))
categories_in = MapCompose(str.strip, lambda name: MangaCategory(name=name))
categories_out = Identity()
cover_image_in = background_image_in
promo_image_in = background_image_in
ref_urls_out = Identity()
schedule_in = MapCompose(lambda s: 1 if "完结" in s else 0)
class ChapterLoader(ItemLoader):
default_input_processor = MapCompose(str.strip)
default_output_processor = TakeFirst()
default_item_class = MangaChapter
ref_urls_out = Identity()
cover_image_in = MangaLoader.cover_image_in
assets_in = Compose(
lambda val: [
PHAsset(files=[dict(ref_url=url) for url in arg_to_iter(urls)])
for urls in [arg_to_iter(val)]
if urls
]
)
| none | 1 | 2.573682 | 3 |
|
Mini Projects/QuadraticSolver/QuadraticSolver.py | Snowystar122/Python-Projects | 0 | 6632975 | import math
# Generates real and complex solutions for a quadratic polynomial
def solution(var_a, var_b, to_root):
if to_root > 0:
sol_1 = (-1 * var_b + math.sqrt(to_root)) / (2 * var_a)
sol_2 = (-1 * var_b - math.sqrt(to_root)) / (2 * var_a)
return f"The solutions are:\n{sol_1}\n{sol_2}"
elif to_root == 0:
sol_1 = (-1 * var_b + math.sqrt(to_root)) / (2 * var_a)
return f"The solution to this quadratic equation is:\n{sol_1}"
else:
real_coefficient = (-1 * var_b) / (2 * var_a)
complex_coefficient_1 = math.sqrt(-1 * to_root) / (2 * var_a)
complex_coefficient_2 = -(1 * math.sqrt(-1 * to_root)) / (2 * var_a)
sol_1 = complex(real_coefficient, complex_coefficient_1)
sol_2 = complex(real_coefficient, complex_coefficient_2)
return f"The solutions are:\n{sol_1}\n{sol_2}"
print("Welcome to this polynomial solver. Please place in the coefficients of your quadratic where appropriate.\n"
"This is in the format of ax^2 + bx + c = 0. Enter the coefficients as required below.")
# Coefficients of the polynomial
a = input("a:")
b = input("b:")
c = input("c:")
check_numeric = all((a.isnumeric(), b.isnumeric(), c.isnumeric()))
if check_numeric:
a, b, c = float(a), float(b), float(c)
root_sol = b ** 2 - 4 * a * c
print(solution(a, b, root_sol))
else:
print("You can only insert numbers.")
| import math
# Generates real and complex solutions for a quadratic polynomial
def solution(var_a, var_b, to_root):
if to_root > 0:
sol_1 = (-1 * var_b + math.sqrt(to_root)) / (2 * var_a)
sol_2 = (-1 * var_b - math.sqrt(to_root)) / (2 * var_a)
return f"The solutions are:\n{sol_1}\n{sol_2}"
elif to_root == 0:
sol_1 = (-1 * var_b + math.sqrt(to_root)) / (2 * var_a)
return f"The solution to this quadratic equation is:\n{sol_1}"
else:
real_coefficient = (-1 * var_b) / (2 * var_a)
complex_coefficient_1 = math.sqrt(-1 * to_root) / (2 * var_a)
complex_coefficient_2 = -(1 * math.sqrt(-1 * to_root)) / (2 * var_a)
sol_1 = complex(real_coefficient, complex_coefficient_1)
sol_2 = complex(real_coefficient, complex_coefficient_2)
return f"The solutions are:\n{sol_1}\n{sol_2}"
print("Welcome to this polynomial solver. Please place in the coefficients of your quadratic where appropriate.\n"
"This is in the format of ax^2 + bx + c = 0. Enter the coefficients as required below.")
# Coefficients of the polynomial
a = input("a:")
b = input("b:")
c = input("c:")
check_numeric = all((a.isnumeric(), b.isnumeric(), c.isnumeric()))
if check_numeric:
a, b, c = float(a), float(b), float(c)
root_sol = b ** 2 - 4 * a * c
print(solution(a, b, root_sol))
else:
print("You can only insert numbers.")
| en | 0.81215 | # Generates real and complex solutions for a quadratic polynomial # Coefficients of the polynomial | 4.10967 | 4 |
arguments/user_init.py | JedBurke/Rename-py | 0 | 6632976 | import argparse
import os
from helpers.user import UserHelpers
class InitializeUserConfig(argparse.Action):
"""docstring for InitializeUserConfig"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(InitializeUserConfig, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
user_dir = UserHelpers.get_user_directory()
if user_dir.exists():
print(f"User directory already exists at:\n { user_dir }")
| import argparse
import os
from helpers.user import UserHelpers
class InitializeUserConfig(argparse.Action):
"""docstring for InitializeUserConfig"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(InitializeUserConfig, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
user_dir = UserHelpers.get_user_directory()
if user_dir.exists():
print(f"User directory already exists at:\n { user_dir }")
| en | 0.387629 | docstring for InitializeUserConfig | 2.892826 | 3 |
tests/test_datastore.py | pyeventsourcing/eventsourcing-sqlalchemy | 13 | 6632977 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from unittest import TestCase
from sqlalchemy.future import create_engine
from sqlalchemy.orm import sessionmaker
from eventsourcing_sqlalchemy.datastore import SQLAlchemyDatastore
class TestDatastore(TestCase):
def test_should_be_created_with_url(self) -> None:
datastore = SQLAlchemyDatastore(url="sqlite:///:memory:")
self.assertIsInstance(datastore, SQLAlchemyDatastore)
def test_should_be_created_with_session_cls(self) -> None:
session_cls = sessionmaker(bind=create_engine(url="sqlite:///:memory:"))
datastore = SQLAlchemyDatastore(session_cls=session_cls)
self.assertIsInstance(datastore, SQLAlchemyDatastore)
def test_should_raise_exception_without_url_or_session_cls(self) -> None:
with self.assertRaises(EnvironmentError):
SQLAlchemyDatastore()
| # -*- coding: utf-8 -*-
from unittest import TestCase
from sqlalchemy.future import create_engine
from sqlalchemy.orm import sessionmaker
from eventsourcing_sqlalchemy.datastore import SQLAlchemyDatastore
class TestDatastore(TestCase):
def test_should_be_created_with_url(self) -> None:
datastore = SQLAlchemyDatastore(url="sqlite:///:memory:")
self.assertIsInstance(datastore, SQLAlchemyDatastore)
def test_should_be_created_with_session_cls(self) -> None:
session_cls = sessionmaker(bind=create_engine(url="sqlite:///:memory:"))
datastore = SQLAlchemyDatastore(session_cls=session_cls)
self.assertIsInstance(datastore, SQLAlchemyDatastore)
def test_should_raise_exception_without_url_or_session_cls(self) -> None:
with self.assertRaises(EnvironmentError):
SQLAlchemyDatastore() | en | 0.769321 | # -*- coding: utf-8 -*- | 2.624546 | 3 |
tests/test_pedpedspace_interaction.py | Femme-js/PySocialForceJ | 42 | 6632978 | <filename>tests/test_pedpedspace_interaction.py
import numpy as np
import pysocialforce as psf
def test_r_aB():
state = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0, 1.0],])
obstacles = [np.array([[0.0, 100.0], [0.0, 0.5]])]
r_aB = psf.PedSpacePotential(obstacles).r_aB(state)
assert r_aB.tolist() == [
[[0.0, -0.5]],
[[1.0, -0.5]],
]
| <filename>tests/test_pedpedspace_interaction.py
import numpy as np
import pysocialforce as psf
def test_r_aB():
state = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0, 1.0],])
obstacles = [np.array([[0.0, 100.0], [0.0, 0.5]])]
r_aB = psf.PedSpacePotential(obstacles).r_aB(state)
assert r_aB.tolist() == [
[[0.0, -0.5]],
[[1.0, -0.5]],
]
| none | 1 | 2.397244 | 2 |
|
third_party/spider/baselines/seq2seq_attention_copy/seq2seq/models/attention_seq2seq.py | chenyangh/tensor2struct-public | 69 | 6632979 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sequence to Sequence model with attention
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pydoc import locate
import tensorflow as tf
from seq2seq import decoders
from seq2seq.models.basic_seq2seq import BasicSeq2Seq
class AttentionSeq2Seq(BasicSeq2Seq):
"""Sequence2Sequence model with attention mechanism.
Args:
source_vocab_info: An instance of `VocabInfo`
for the source vocabulary
target_vocab_info: An instance of `VocabInfo`
for the target vocabulary
params: A dictionary of hyperparameters
"""
def __init__(self, params, mode, name="att_seq2seq"):
super(AttentionSeq2Seq, self).__init__(params, mode, name)
@staticmethod
def default_params():
params = BasicSeq2Seq.default_params().copy()
params.update({
"attention.class": "AttentionLayerBahdanau",
"attention.params": {"num_units": 150},
"bridge.class": "seq2seq.models.bridges.ZeroBridge",
"encoder.class": "seq2seq.encoders.BidirectionalRNNEncoder",
"encoder.params": {"rnn_cell": {"cell_class": "LSTMCell",
"cell_params":
{"num_units": 150},
"dropout_input_keep_prob": 0.5,
"dropout_output_keep_prob": 0.5,
"num_layers": 1}},
"decoder.class": "seq2seq.decoders.AttentionDecoder",
"decoder.params": {"max_decode_length": 250,
"rnn_cell": {"cell_class": "LSTMCell",
"cell_params":
{"num_units": 150},
"dropout_input_keep_prob": 0.5,
"dropout_output_keep_prob": 0.5,
"num_layers": 1}},
"optimizer.name": "Adam",
"optimizer.params": {"epsilon": 0.0000008},
"optimizer.learning_rate": 0.0005,
"source.max_seq_len": 50,
"source.reverse": False,
"target.max_seq_len": 250,
})
return params
def _create_decoder(self, encoder_output, features, _labels):
attention_class = locate(self.params["attention.class"]) or \
getattr(decoders.attention, self.params["attention.class"])
attention_layer = attention_class(
params=self.params["attention.params"], mode=self.mode)
# If the input sequence is reversed we also need to reverse
# the attention scores.
reverse_scores_lengths = None
if self.params["source.reverse"]:
reverse_scores_lengths = features["source_len"]
if self.use_beam_search:
reverse_scores_lengths = tf.tile(
input=reverse_scores_lengths,
multiples=[self.params["inference.beam_search.beam_width"]])
decoder_mask = features["decoder_mask"]
return self.decoder_class(
params=self.params["decoder.params"],
mode=self.mode,
vocab_size=self.target_vocab_info.total_size,
attention_values=encoder_output.attention_values,
attention_values_length=encoder_output.attention_values_length,
attention_keys=encoder_output.outputs,
attention_fn=attention_layer,
reverse_scores_lengths=reverse_scores_lengths,
decoder_mask = decoder_mask)
| # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sequence to Sequence model with attention
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pydoc import locate
import tensorflow as tf
from seq2seq import decoders
from seq2seq.models.basic_seq2seq import BasicSeq2Seq
class AttentionSeq2Seq(BasicSeq2Seq):
"""Sequence2Sequence model with attention mechanism.
Args:
source_vocab_info: An instance of `VocabInfo`
for the source vocabulary
target_vocab_info: An instance of `VocabInfo`
for the target vocabulary
params: A dictionary of hyperparameters
"""
def __init__(self, params, mode, name="att_seq2seq"):
super(AttentionSeq2Seq, self).__init__(params, mode, name)
@staticmethod
def default_params():
params = BasicSeq2Seq.default_params().copy()
params.update({
"attention.class": "AttentionLayerBahdanau",
"attention.params": {"num_units": 150},
"bridge.class": "seq2seq.models.bridges.ZeroBridge",
"encoder.class": "seq2seq.encoders.BidirectionalRNNEncoder",
"encoder.params": {"rnn_cell": {"cell_class": "LSTMCell",
"cell_params":
{"num_units": 150},
"dropout_input_keep_prob": 0.5,
"dropout_output_keep_prob": 0.5,
"num_layers": 1}},
"decoder.class": "seq2seq.decoders.AttentionDecoder",
"decoder.params": {"max_decode_length": 250,
"rnn_cell": {"cell_class": "LSTMCell",
"cell_params":
{"num_units": 150},
"dropout_input_keep_prob": 0.5,
"dropout_output_keep_prob": 0.5,
"num_layers": 1}},
"optimizer.name": "Adam",
"optimizer.params": {"epsilon": 0.0000008},
"optimizer.learning_rate": 0.0005,
"source.max_seq_len": 50,
"source.reverse": False,
"target.max_seq_len": 250,
})
return params
def _create_decoder(self, encoder_output, features, _labels):
attention_class = locate(self.params["attention.class"]) or \
getattr(decoders.attention, self.params["attention.class"])
attention_layer = attention_class(
params=self.params["attention.params"], mode=self.mode)
# If the input sequence is reversed we also need to reverse
# the attention scores.
reverse_scores_lengths = None
if self.params["source.reverse"]:
reverse_scores_lengths = features["source_len"]
if self.use_beam_search:
reverse_scores_lengths = tf.tile(
input=reverse_scores_lengths,
multiples=[self.params["inference.beam_search.beam_width"]])
decoder_mask = features["decoder_mask"]
return self.decoder_class(
params=self.params["decoder.params"],
mode=self.mode,
vocab_size=self.target_vocab_info.total_size,
attention_values=encoder_output.attention_values,
attention_values_length=encoder_output.attention_values_length,
attention_keys=encoder_output.outputs,
attention_fn=attention_layer,
reverse_scores_lengths=reverse_scores_lengths,
decoder_mask = decoder_mask)
| en | 0.812775 | # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Sequence to Sequence model with attention Sequence2Sequence model with attention mechanism. Args: source_vocab_info: An instance of `VocabInfo` for the source vocabulary target_vocab_info: An instance of `VocabInfo` for the target vocabulary params: A dictionary of hyperparameters # If the input sequence is reversed we also need to reverse # the attention scores. | 2.079861 | 2 |
Entities/RBAC.py | srinibasmisra97/OAuth-Authorization-Server | 0 | 6632980 | from Utils.DBOperations import Read, Update
from Entities.Clients import Clients
import json, uuid
db_obj = None
COL_NAME = 'applications'
def db_init():
"""
This function checks the mongodb connections object.
:return: Mongodb connections object.
"""
global db_obj
if db_obj is None:
from main import MONGO_HOST, MONGO_PORT, MONGO_USERNAME, MONGO_PASSWORD, MONGO_DB
from Utils.MongoHandler import ConnectDB
db_obj = ConnectDB(host=MONGO_HOST, port=MONGO_PORT, username=MONGO_USERNAME, password=<PASSWORD>,
db=MONGO_DB).getMongoDbObject()
return db_obj
class Permission(object):
def __init__(self, name="", value=""):
"""
Init method for a permission.
:param name: Name of the permission.
:param value: Permission string. Should be unique.
"""
self.name = name
self.value = value
def get(self, application, permission=""):
"""
This function returns a permission document.
:param application: Application.
:param permission: Value of the permission.
:return: Dictionary.
"""
db_obj = db_init()
if permission == "":
permission = self.value
condition = {
"permissions.value": permission
}
result = Read().find_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition)
for app in result:
if app['api'] == application.api:
for perm in app['permissions']:
if permission == perm['value']:
return perm
return {}
def add(self, client, application, name="", value=""):
"""
This function adds a permission for a specific app.
:param client: Client entity object for the application client.
:param application: Application.
:param name: Name of the permission.
:param value: String value of the permission. Should be unique.
:return: Added permission.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if name == "":
name = self.name
if value == "":
value = self.value
if self.get(application=application, permission=value):
return None, "existing permission"
condition = {
"api": application.api
}
data = {
"$push": {
"permissions": {
"name": name,
"value": value
}
}
}
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data)
return result, "updated" if result else "fail"
def add_many(self, client, application, permissions):
"""
Add multiple permissions.
:param client: Client entity object.
:param application: Application.
:param permissions: Permissions array.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
existing = application.permissions
common = []
for value in existing:
for p in permissions:
if value['value'] == p['value']:
common.append(value)
if len(common) != 0:
return None, "existing"
condition = {
"api": application.api
}
data = {
"$push": {
"permissions": {
"$each": permissions
}
}
}
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data)
return result, "updated" if result else "fail"
def remove(self, client, application, permission=""):
"""
This function removes a permission for an application.
:param client: Client entities object.
:param application: Application.
:param permission: Permission string to remove.
:return: Delete Object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if permission == "":
permission = self.value
condition = {
"api": application.api
}
data = {
"$pull": {
"permissions": {
"value": permission
}
}
}
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
data=data,
condition=condition)
return result, "removed" if result else "fail"
def update_name(self, client, application, name, permission=""):
"""
This function updates the name of the permission.
:param client: Client entities object.
:param application: Application.
:param name: Name value to update.
:param permission: Permission value.
:return: Update result object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if permission == "":
permission = self.value
condition = {
"api": application.api,
"permissions.value": permission
}
data = {
"$set": {
"permissions.$[permission].name": name
}
}
array_filters = [{"permission.value": permission}]
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
data=data,
condition=condition,
array_filters=array_filters)
return result, "updated" if result else "failed"
def update_value(self, client, application, new_value, old_value=""):
"""
This function updates the value of the permission.
:param client: Client entities object.
:param application: Application.
:param new_value: New value to set.
:param old_value: Old value to look for.
:return: Result object
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if new_value in application.permissions:
return None, "existing"
if old_value == "":
old_value = self.value
condition = {
"api": application.api,
"permissions.value": old_value
}
data = {
"$set": {"permissions.$[permission].value": new_value}
}
array_filters = [{"permission.value": old_value}]
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
data=data,
condition=condition,
array_filters=array_filters)
return result, "updated" if result else "failed"
class Role(object):
def __init__(self, name="", id="", permissions=[]):
"""
Init function for role object.
:param name: Name of the role.
:param id: Unique id of the role.
:param permissions: List of permissions.
"""
self.name = name
self.id = id
self.permissions = permissions
def setattr(self, doc):
"""
This function is used to set attributes for the Role object.
:param doc: Role document from db.
"""
if "name" in doc:
self.name = doc['name']
if "id" in doc:
self.id = doc['id']
if "permissions" in doc:
self.permissions = doc['permissions']
def get(self, application, client=None, role_id=""):
"""
Gets the roles for the application.
:param client: Client object.
:param application: Application object.
:param role_id: Role id.
:return: List.
"""
db_obj = db_init()
if client is not None:
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if role_id == "":
role_id = self.id
condition = {'api': application.api, 'roles.id': role_id}
result = Read().find_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition)
for app in result:
if app['api'] == application.api:
for role in app['roles']:
if role_id == role['id']:
self.setattr(doc=role)
return role
return {}
def add(self, client, application, name="", role_id="", permissions=[]):
"""
Adds a role for the application.
:param client: Client object.
:param application: Application object.
:param name: Role name.
:param role_id: Role id.
:param permissions: Permissions list.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if name == "":
name = self.name
if role_id == "":
role_id = self.id
if permissions:
permissions = self.permissions
condition = {'api': application.api}
data = {"$push": {"roles": {"name": name, "id": role_id, "permissions": permissions}}}
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition, data=data)
return result, "updated" if result else "failed"
def add_many(self, client, application, roles):
"""
Adds multiple roles for the application.
:param client: Client object.
:param application: Application object.
:param roles: Roles array.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
existing = application.roles
common = []
for value in existing:
for role in roles:
if value['id'] == role['id']:
common.append(value)
if len(common) != 0:
return None, "existing"
condition = {
"api": application.api
}
data = {
"$push": {
"roles": {
"$each": roles
}
}
}
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data)
return result, "updated" if result else "fail"
def update_name(self, client, application, name, role_id=""):
"""
Updates the name of the role.
:param client: Client object.
:param application: Application Object.
:param name: New name.
:param role_id: Role id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if role_id == "":
role_id = self.id
condition = {
"api": application.api,
"roles.id": role_id
}
data = {
"$set": {
"roles.$[role].name": name
}
}
array_filters = [{"role.id": role_id}]
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
data=data,
condition=condition,
array_filters=array_filters)
return result, "updated" if result else "failed"
def update_permissions(self, client, application, permissions, role_id=""):
"""
This function sets the new set of permissions for a role.
:param client: Client object.
:param application: Application object.
:param permissions: Permissions array.
:param role_id: Role id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if role_id == "":
role_id = self.id
condition = {
"api": application.api,
"roles.id": role_id
}
data = {"$set": {"roles.$[role].permissions": permissions}}
array_filters = [{"role.id": role_id}]
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data,
array_filters=array_filters)
return result, "removed" if result else "failed"
def add_permissions(self, client, application, permissions, role_id=""):
"""
Add permissions for a role.
:param client: Client object.
:param application: Application object.
:param permissions: Permissions list.
:param role_id: Role id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if role_id == "":
role_id = self.id
for p in permissions:
if p not in application.permissions:
return None, "permission not defined"
condition = {
"api": application.api,
"roles.id": role_id
}
data = {"$push": {"roles.$[role].permissions": {"$each": permissions}}}
array_filters = [{"role.id": role_id}]
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data,
array_filters=array_filters)
return result, "updated" if result else "failed"
def remove_permissions(self, client, application, permissions, role_id=""):
"""
Remove permissions for a role.
:param client: Client object.
:param application: Application object.
:param permissions: Permissions to remove.
:param role_id: Role id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if role_id == "":
role_id = self.id
to_remove = []
for p in permissions:
if p in application.permissions:
to_remove.append(p)
condition = {
"api": application.api,
"roles.id": role_id
}
data = {"$set": {"roles.$[role].permissions": to_remove}}
array_filters = [{"role.id": role_id}]
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data,
array_filters=array_filters)
return result, "removed" if result else "failed"
def delete(self, client, application, role_id=""):
"""
This function deletes a role for an application.
:param client: Client object.
:param application: Application object.
:param role_id: Role id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if role_id == "":
role_id = self.id
condition = {
'api': application.api,
'roles.id': role_id
}
data = {
'$pull': {'roles': {'id': role_id}}
}
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, data=data, condition=condition)
return result, "removed" if result else "failed"
class User(object):
def __init__(self, id_="", email="", name="", role=""):
"""
Init function for creating a member object.
:param id_: Unique id of the user.
:param email: Email id.
:param name: Name
:param role: Role id.
"""
self.id_ = id_
self.email = email
self.name = name
self.role = role
def __str__(self):
"""
Returns a string of the user object.
:return: String.
"""
return json.dumps({'email': self.email, 'name': self.name, 'role': self.role, 'id_': self.id_})
def json(self):
"""
Returns a json object.
:return: JSON.
"""
return {'email': self.email, 'name': self.name, 'role': self.role, 'id_': self.id_}
def setattr(self, doc):
"""
Set object attributes from document.
:param doc: Document.
"""
if "id_" in doc:
self.id_ = doc['id_']
if "email" in doc:
self.email = doc['email']
if "name" in doc:
self.name = doc['name']
if "role" in doc:
self.role = doc['role']
def get_by_id(self, application, client=None, id_=""):
"""
Get an user by its id.
:param application: Application object.
:param client: Client object.
:param id_: Id to look for.
:return: Document
"""
db_obj = db_init()
if client is not None:
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if id_ == "":
id_ = self.id_
condition = {'api': application.api, 'users.id_': id_}
result = Read().find_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition)
for app in result:
for user in app['users']:
if user['id_'] == id_:
self.setattr(user)
return user
return {}
def get_by_email(self, application, client=None, email=""):
"""
Get an user by its email.
:param client: Client object.
:param application: Application object.
:param email: Email id.
:return: Document.
"""
db_obj = db_init()
if client is not None:
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if email == "":
email = self.email
condition = {'api': application.api, 'users.email': email}
result = Read().find_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition)
for app in result:
for user in app['users']:
if user['email'] == email:
self.setattr(user)
return user
return {}
def add(self, client, application, email="", role="", name=""):
"""
Adding a single user for an application.
:param client: Client object.
:param application: Application object.
:param email: Email id.
:param role: Role.
:param name: Name
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if email == "":
email = self.email
if role == "":
role = self.role
if name == "":
name = self.name
self.id_ = str(uuid.uuid1().hex)
condition = {'api': application.api}
data = {
'$push': {
'users': {
'id_': self.id_,
'email': email,
'name': name,
'role': role
}
}
}
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, data=data, condition=condition)
return result, "updated" if result else "failed"
def add_many(self, client, application, users=[]):
"""
Add multiple users to an app at once.
:param client: Client object.
:param application: Application object.
:param users: Users array.
:return:
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
existing = application.users
common = []
for value in existing:
for user in users:
user['id_'] = str(uuid.uuid1().hex)
if value['email'] == user['email']:
common.append(value)
if len(common) != 0:
return None, "existing"
condition = {
"api": application.api
}
data = {
"$push": {
"users": {
"$each": users
}
}
}
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data)
return result, "updated" if result else "fail"
def remove(self, client, application, email=""):
"""
Removes an user from the application.
:param client: Client object.
:param application: Application object.
:param email: Email id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if email == "":
email = self.email
condition = {'api': application.api, 'users.email': email}
data = {'$pull': {'users': {'email': email}}}
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition, data=data)
return result, "removed" if result else "failed"
def update_email(self, client, application, new_email="", old_email=""):
"""
Updates the email address of an user.
:param client: Client object.
:param application: Application object.
:param new_email: New email id.
:param old_email: Old email id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if old_email == "":
old_email = self.email
existing = application.users
if new_email in existing:
return None, "existing"
condition = {'api': application.api, 'users.email': old_email}
data = {'$set': {'users.$[user].email': new_email}}
array_filters = [{'user.email': old_email}]
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition, data=data,
array_filters=array_filters)
return result, "update" if result else "failed"
def update_name(self, client, application, name, email=""):
"""
Update name of user.
:param client: Client object.
:param application: Application object.
:param name: Name.
:param email: Email id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if email == "":
email = self.email
condition = {'api': application.api, 'users.email': email}
data = {'$set': {'users.$[user].name': name}}
array_filters = [{'user.email': email}]
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition, data=data,
array_filters=array_filters)
return result, "update" if result else "failed"
def update_role(self, client, application, role, email=""):
"""
Updates the email address of an user.
:param client: Client object.
:param application: Application object.
:param role: New role.
:param email: Email id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if email == "":
email = self.email
existing = application.roles
found = False
for er in existing:
if role == er['id']:
found = True
if not found:
return None, "role not defined"
condition = {'api': application.api, 'users.email': email}
data = {'$set': {'users.$[user].role': role}}
array_filters = [{'user.email': email}]
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition, data=data,
array_filters=array_filters)
return result, "update" if result else "failed"
| from Utils.DBOperations import Read, Update
from Entities.Clients import Clients
import json, uuid
db_obj = None
COL_NAME = 'applications'
def db_init():
"""
This function checks the mongodb connections object.
:return: Mongodb connections object.
"""
global db_obj
if db_obj is None:
from main import MONGO_HOST, MONGO_PORT, MONGO_USERNAME, MONGO_PASSWORD, MONGO_DB
from Utils.MongoHandler import ConnectDB
db_obj = ConnectDB(host=MONGO_HOST, port=MONGO_PORT, username=MONGO_USERNAME, password=<PASSWORD>,
db=MONGO_DB).getMongoDbObject()
return db_obj
class Permission(object):
def __init__(self, name="", value=""):
"""
Init method for a permission.
:param name: Name of the permission.
:param value: Permission string. Should be unique.
"""
self.name = name
self.value = value
def get(self, application, permission=""):
"""
This function returns a permission document.
:param application: Application.
:param permission: Value of the permission.
:return: Dictionary.
"""
db_obj = db_init()
if permission == "":
permission = self.value
condition = {
"permissions.value": permission
}
result = Read().find_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition)
for app in result:
if app['api'] == application.api:
for perm in app['permissions']:
if permission == perm['value']:
return perm
return {}
def add(self, client, application, name="", value=""):
"""
This function adds a permission for a specific app.
:param client: Client entity object for the application client.
:param application: Application.
:param name: Name of the permission.
:param value: String value of the permission. Should be unique.
:return: Added permission.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if name == "":
name = self.name
if value == "":
value = self.value
if self.get(application=application, permission=value):
return None, "existing permission"
condition = {
"api": application.api
}
data = {
"$push": {
"permissions": {
"name": name,
"value": value
}
}
}
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data)
return result, "updated" if result else "fail"
def add_many(self, client, application, permissions):
"""
Add multiple permissions.
:param client: Client entity object.
:param application: Application.
:param permissions: Permissions array.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
existing = application.permissions
common = []
for value in existing:
for p in permissions:
if value['value'] == p['value']:
common.append(value)
if len(common) != 0:
return None, "existing"
condition = {
"api": application.api
}
data = {
"$push": {
"permissions": {
"$each": permissions
}
}
}
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data)
return result, "updated" if result else "fail"
def remove(self, client, application, permission=""):
"""
This function removes a permission for an application.
:param client: Client entities object.
:param application: Application.
:param permission: Permission string to remove.
:return: Delete Object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if permission == "":
permission = self.value
condition = {
"api": application.api
}
data = {
"$pull": {
"permissions": {
"value": permission
}
}
}
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
data=data,
condition=condition)
return result, "removed" if result else "fail"
def update_name(self, client, application, name, permission=""):
"""
This function updates the name of the permission.
:param client: Client entities object.
:param application: Application.
:param name: Name value to update.
:param permission: Permission value.
:return: Update result object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if permission == "":
permission = self.value
condition = {
"api": application.api,
"permissions.value": permission
}
data = {
"$set": {
"permissions.$[permission].name": name
}
}
array_filters = [{"permission.value": permission}]
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
data=data,
condition=condition,
array_filters=array_filters)
return result, "updated" if result else "failed"
def update_value(self, client, application, new_value, old_value=""):
"""
This function updates the value of the permission.
:param client: Client entities object.
:param application: Application.
:param new_value: New value to set.
:param old_value: Old value to look for.
:return: Result object
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if new_value in application.permissions:
return None, "existing"
if old_value == "":
old_value = self.value
condition = {
"api": application.api,
"permissions.value": old_value
}
data = {
"$set": {"permissions.$[permission].value": new_value}
}
array_filters = [{"permission.value": old_value}]
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
data=data,
condition=condition,
array_filters=array_filters)
return result, "updated" if result else "failed"
class Role(object):
def __init__(self, name="", id="", permissions=[]):
"""
Init function for role object.
:param name: Name of the role.
:param id: Unique id of the role.
:param permissions: List of permissions.
"""
self.name = name
self.id = id
self.permissions = permissions
def setattr(self, doc):
"""
This function is used to set attributes for the Role object.
:param doc: Role document from db.
"""
if "name" in doc:
self.name = doc['name']
if "id" in doc:
self.id = doc['id']
if "permissions" in doc:
self.permissions = doc['permissions']
def get(self, application, client=None, role_id=""):
"""
Gets the roles for the application.
:param client: Client object.
:param application: Application object.
:param role_id: Role id.
:return: List.
"""
db_obj = db_init()
if client is not None:
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if role_id == "":
role_id = self.id
condition = {'api': application.api, 'roles.id': role_id}
result = Read().find_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition)
for app in result:
if app['api'] == application.api:
for role in app['roles']:
if role_id == role['id']:
self.setattr(doc=role)
return role
return {}
def add(self, client, application, name="", role_id="", permissions=[]):
"""
Adds a role for the application.
:param client: Client object.
:param application: Application object.
:param name: Role name.
:param role_id: Role id.
:param permissions: Permissions list.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if name == "":
name = self.name
if role_id == "":
role_id = self.id
if permissions:
permissions = self.permissions
condition = {'api': application.api}
data = {"$push": {"roles": {"name": name, "id": role_id, "permissions": permissions}}}
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition, data=data)
return result, "updated" if result else "failed"
def add_many(self, client, application, roles):
"""
Adds multiple roles for the application.
:param client: Client object.
:param application: Application object.
:param roles: Roles array.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
existing = application.roles
common = []
for value in existing:
for role in roles:
if value['id'] == role['id']:
common.append(value)
if len(common) != 0:
return None, "existing"
condition = {
"api": application.api
}
data = {
"$push": {
"roles": {
"$each": roles
}
}
}
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data)
return result, "updated" if result else "fail"
def update_name(self, client, application, name, role_id=""):
"""
Updates the name of the role.
:param client: Client object.
:param application: Application Object.
:param name: New name.
:param role_id: Role id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if role_id == "":
role_id = self.id
condition = {
"api": application.api,
"roles.id": role_id
}
data = {
"$set": {
"roles.$[role].name": name
}
}
array_filters = [{"role.id": role_id}]
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
data=data,
condition=condition,
array_filters=array_filters)
return result, "updated" if result else "failed"
def update_permissions(self, client, application, permissions, role_id=""):
"""
This function sets the new set of permissions for a role.
:param client: Client object.
:param application: Application object.
:param permissions: Permissions array.
:param role_id: Role id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if role_id == "":
role_id = self.id
condition = {
"api": application.api,
"roles.id": role_id
}
data = {"$set": {"roles.$[role].permissions": permissions}}
array_filters = [{"role.id": role_id}]
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data,
array_filters=array_filters)
return result, "removed" if result else "failed"
def add_permissions(self, client, application, permissions, role_id=""):
"""
Add permissions for a role.
:param client: Client object.
:param application: Application object.
:param permissions: Permissions list.
:param role_id: Role id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if role_id == "":
role_id = self.id
for p in permissions:
if p not in application.permissions:
return None, "permission not defined"
condition = {
"api": application.api,
"roles.id": role_id
}
data = {"$push": {"roles.$[role].permissions": {"$each": permissions}}}
array_filters = [{"role.id": role_id}]
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data,
array_filters=array_filters)
return result, "updated" if result else "failed"
def remove_permissions(self, client, application, permissions, role_id=""):
"""
Remove permissions for a role.
:param client: Client object.
:param application: Application object.
:param permissions: Permissions to remove.
:param role_id: Role id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if role_id == "":
role_id = self.id
to_remove = []
for p in permissions:
if p in application.permissions:
to_remove.append(p)
condition = {
"api": application.api,
"roles.id": role_id
}
data = {"$set": {"roles.$[role].permissions": to_remove}}
array_filters = [{"role.id": role_id}]
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data,
array_filters=array_filters)
return result, "removed" if result else "failed"
def delete(self, client, application, role_id=""):
"""
This function deletes a role for an application.
:param client: Client object.
:param application: Application object.
:param role_id: Role id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if role_id == "":
role_id = self.id
condition = {
'api': application.api,
'roles.id': role_id
}
data = {
'$pull': {'roles': {'id': role_id}}
}
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, data=data, condition=condition)
return result, "removed" if result else "failed"
class User(object):
def __init__(self, id_="", email="", name="", role=""):
"""
Init function for creating a member object.
:param id_: Unique id of the user.
:param email: Email id.
:param name: Name
:param role: Role id.
"""
self.id_ = id_
self.email = email
self.name = name
self.role = role
def __str__(self):
"""
Returns a string of the user object.
:return: String.
"""
return json.dumps({'email': self.email, 'name': self.name, 'role': self.role, 'id_': self.id_})
def json(self):
"""
Returns a json object.
:return: JSON.
"""
return {'email': self.email, 'name': self.name, 'role': self.role, 'id_': self.id_}
def setattr(self, doc):
"""
Set object attributes from document.
:param doc: Document.
"""
if "id_" in doc:
self.id_ = doc['id_']
if "email" in doc:
self.email = doc['email']
if "name" in doc:
self.name = doc['name']
if "role" in doc:
self.role = doc['role']
def get_by_id(self, application, client=None, id_=""):
"""
Get an user by its id.
:param application: Application object.
:param client: Client object.
:param id_: Id to look for.
:return: Document
"""
db_obj = db_init()
if client is not None:
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if id_ == "":
id_ = self.id_
condition = {'api': application.api, 'users.id_': id_}
result = Read().find_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition)
for app in result:
for user in app['users']:
if user['id_'] == id_:
self.setattr(user)
return user
return {}
def get_by_email(self, application, client=None, email=""):
"""
Get an user by its email.
:param client: Client object.
:param application: Application object.
:param email: Email id.
:return: Document.
"""
db_obj = db_init()
if client is not None:
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if email == "":
email = self.email
condition = {'api': application.api, 'users.email': email}
result = Read().find_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition)
for app in result:
for user in app['users']:
if user['email'] == email:
self.setattr(user)
return user
return {}
def add(self, client, application, email="", role="", name=""):
"""
Adding a single user for an application.
:param client: Client object.
:param application: Application object.
:param email: Email id.
:param role: Role.
:param name: Name
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if email == "":
email = self.email
if role == "":
role = self.role
if name == "":
name = self.name
self.id_ = str(uuid.uuid1().hex)
condition = {'api': application.api}
data = {
'$push': {
'users': {
'id_': self.id_,
'email': email,
'name': name,
'role': role
}
}
}
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, data=data, condition=condition)
return result, "updated" if result else "failed"
def add_many(self, client, application, users=[]):
"""
Add multiple users to an app at once.
:param client: Client object.
:param application: Application object.
:param users: Users array.
:return:
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
existing = application.users
common = []
for value in existing:
for user in users:
user['id_'] = str(uuid.uuid1().hex)
if value['email'] == user['email']:
common.append(value)
if len(common) != 0:
return None, "existing"
condition = {
"api": application.api
}
data = {
"$push": {
"users": {
"$each": users
}
}
}
result = Update().update_one_by_condition(db_obj=db_obj,
collection=COL_NAME,
condition=condition,
data=data)
return result, "updated" if result else "fail"
def remove(self, client, application, email=""):
"""
Removes an user from the application.
:param client: Client object.
:param application: Application object.
:param email: Email id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if email == "":
email = self.email
condition = {'api': application.api, 'users.email': email}
data = {'$pull': {'users': {'email': email}}}
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition, data=data)
return result, "removed" if result else "failed"
def update_email(self, client, application, new_email="", old_email=""):
"""
Updates the email address of an user.
:param client: Client object.
:param application: Application object.
:param new_email: New email id.
:param old_email: Old email id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if old_email == "":
old_email = self.email
existing = application.users
if new_email in existing:
return None, "existing"
condition = {'api': application.api, 'users.email': old_email}
data = {'$set': {'users.$[user].email': new_email}}
array_filters = [{'user.email': old_email}]
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition, data=data,
array_filters=array_filters)
return result, "update" if result else "failed"
def update_name(self, client, application, name, email=""):
"""
Update name of user.
:param client: Client object.
:param application: Application object.
:param name: Name.
:param email: Email id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if email == "":
email = self.email
condition = {'api': application.api, 'users.email': email}
data = {'$set': {'users.$[user].name': name}}
array_filters = [{'user.email': email}]
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition, data=data,
array_filters=array_filters)
return result, "update" if result else "failed"
def update_role(self, client, application, role, email=""):
"""
Updates the email address of an user.
:param client: Client object.
:param application: Application object.
:param role: New role.
:param email: Email id.
:return: Update object.
"""
db_obj = db_init()
if client.email != Clients().get_by_id(oid=application.owner)['email']:
return None, "not allowed"
if email == "":
email = self.email
existing = application.roles
found = False
for er in existing:
if role == er['id']:
found = True
if not found:
return None, "role not defined"
condition = {'api': application.api, 'users.email': email}
data = {'$set': {'users.$[user].role': role}}
array_filters = [{'user.email': email}]
result = Update().update_one_by_condition(db_obj=db_obj, collection=COL_NAME, condition=condition, data=data,
array_filters=array_filters)
return result, "update" if result else "failed"
| en | 0.729402 | This function checks the mongodb connections object. :return: Mongodb connections object. Init method for a permission. :param name: Name of the permission. :param value: Permission string. Should be unique. This function returns a permission document. :param application: Application. :param permission: Value of the permission. :return: Dictionary. This function adds a permission for a specific app. :param client: Client entity object for the application client. :param application: Application. :param name: Name of the permission. :param value: String value of the permission. Should be unique. :return: Added permission. Add multiple permissions. :param client: Client entity object. :param application: Application. :param permissions: Permissions array. :return: Update object. This function removes a permission for an application. :param client: Client entities object. :param application: Application. :param permission: Permission string to remove. :return: Delete Object. This function updates the name of the permission. :param client: Client entities object. :param application: Application. :param name: Name value to update. :param permission: Permission value. :return: Update result object. This function updates the value of the permission. :param client: Client entities object. :param application: Application. :param new_value: New value to set. :param old_value: Old value to look for. :return: Result object Init function for role object. :param name: Name of the role. :param id: Unique id of the role. :param permissions: List of permissions. This function is used to set attributes for the Role object. :param doc: Role document from db. Gets the roles for the application. :param client: Client object. :param application: Application object. :param role_id: Role id. :return: List. Adds a role for the application. :param client: Client object. :param application: Application object. :param name: Role name. :param role_id: Role id. :param permissions: Permissions list. :return: Update object. Adds multiple roles for the application. :param client: Client object. :param application: Application object. :param roles: Roles array. :return: Update object. Updates the name of the role. :param client: Client object. :param application: Application Object. :param name: New name. :param role_id: Role id. :return: Update object. This function sets the new set of permissions for a role. :param client: Client object. :param application: Application object. :param permissions: Permissions array. :param role_id: Role id. :return: Update object. Add permissions for a role. :param client: Client object. :param application: Application object. :param permissions: Permissions list. :param role_id: Role id. :return: Update object. Remove permissions for a role. :param client: Client object. :param application: Application object. :param permissions: Permissions to remove. :param role_id: Role id. :return: Update object. This function deletes a role for an application. :param client: Client object. :param application: Application object. :param role_id: Role id. :return: Update object. Init function for creating a member object. :param id_: Unique id of the user. :param email: Email id. :param name: Name :param role: Role id. Returns a string of the user object. :return: String. Returns a json object. :return: JSON. Set object attributes from document. :param doc: Document. Get an user by its id. :param application: Application object. :param client: Client object. :param id_: Id to look for. :return: Document Get an user by its email. :param client: Client object. :param application: Application object. :param email: Email id. :return: Document. Adding a single user for an application. :param client: Client object. :param application: Application object. :param email: Email id. :param role: Role. :param name: Name :return: Update object. Add multiple users to an app at once. :param client: Client object. :param application: Application object. :param users: Users array. :return: Removes an user from the application. :param client: Client object. :param application: Application object. :param email: Email id. :return: Update object. Updates the email address of an user. :param client: Client object. :param application: Application object. :param new_email: New email id. :param old_email: Old email id. :return: Update object. Update name of user. :param client: Client object. :param application: Application object. :param name: Name. :param email: Email id. :return: Update object. Updates the email address of an user. :param client: Client object. :param application: Application object. :param role: New role. :param email: Email id. :return: Update object. | 2.942219 | 3 |
mmdet/models/backbones/gate.py | vinnibuh/mmdetection | 0 | 6632981 | <reponame>vinnibuh/mmdetection
import torch
import torch.nn as nn
from .gumbel import GumbleSoftmax
class GateModule(nn.Module):
def __init__(self, in_ch, act='relu', kernel_size=None, doubleGate=False, dwLA=False):
super(GateModule, self).__init__()
self.doubleGate, self.dwLA = doubleGate, dwLA
self.inp_gs = GumbleSoftmax()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.in_ch = in_ch
if act == 'relu':
relu = nn.ReLU
elif act == 'relu6':
relu = nn.ReLU6
else: raise NotImplementedError
if dwLA:
if doubleGate:
self.inp_att = nn.Sequential(
nn.Conv2d(in_ch, in_ch, kernel_size=kernel_size, stride=1, padding=0, groups=in_ch,
bias=True),
nn.BatchNorm2d(in_ch),
relu(inplace=True),
nn.Conv2d(in_ch, in_ch, kernel_size=1, stride=1, padding=0, bias=True),
nn.Sigmoid()
)
self.inp_gate = nn.Sequential(
nn.Conv2d(in_ch, in_ch, kernel_size=kernel_size, stride=1, padding=0, groups=in_ch, bias=True),
nn.BatchNorm2d(in_ch),
relu(inplace=True),
nn.Conv2d(in_ch, in_ch, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(in_ch),
)
self.inp_gate_l = nn.Conv2d(in_ch, in_ch * 2, kernel_size=1, stride=1, padding=0, groups=in_ch,
bias=True)
else:
if doubleGate:
reduction = 4
self.inp_att = nn.Sequential(
nn.Conv2d(in_ch, in_ch // reduction, kernel_size=1, stride=1, padding=0, bias=True),
relu(inplace=True),
nn.Conv2d(in_ch // reduction, in_ch, kernel_size=1, stride=1, padding=0, bias=True),
nn.Sigmoid()
)
self.inp_gate = nn.Sequential(
nn.Conv2d(in_ch, in_ch, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(in_ch),
relu(inplace=True),
)
self.inp_gate_l = nn.Conv2d(in_ch, in_ch * 2, kernel_size=1, stride=1, padding=0, groups=in_ch, bias=True)
def forward(self, y, cb, cr, temperature=1.):
hatten_y, hatten_cb, hatten_cr = self.avg_pool(y), self.avg_pool(cb), self.avg_pool(cr)
hatten_d2 = torch.cat((hatten_y, hatten_cb, hatten_cr), dim=1)
hatten_d2 = self.inp_gate(hatten_d2)
hatten_d2 = self.inp_gate_l(hatten_d2)
hatten_d2 = hatten_d2.reshape(hatten_d2.size(0), self.in_ch, 2, 1)
hatten_d2 = self.inp_gs(hatten_d2, temp=temperature, force_hard=True)
y = y * hatten_d2[:, :64, 1].unsqueeze(2)
cb = cb * hatten_d2[:, 64:128, 1].unsqueeze(2)
cr = cr * hatten_d2[:, 128:, 1].unsqueeze(2)
return y, cb, cr, hatten_d2[:, :, 1]
class GateModule192(nn.Module):
def __init__(self, act='relu'):
super(GateModule192, self).__init__()
self.inp_gs = GumbleSoftmax()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.in_ch = in_ch = 192
if act == 'relu':
relu = nn.ReLU
elif act == 'relu6':
relu = nn.ReLU6
else: raise NotImplementedError
self.inp_gate = nn.Sequential(
nn.Conv2d(in_ch, in_ch, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(in_ch),
relu(inplace=True),
)
self.inp_gate_l = nn.Conv2d(in_ch, in_ch * 2, kernel_size=1, stride=1, padding=0, groups=in_ch, bias=True)
def forward(self, x, temperature=1.):
hatten = self.avg_pool(x)
hatten_d = self.inp_gate(hatten)
hatten_d = self.inp_gate_l(hatten_d)
hatten_d = hatten_d.reshape(hatten_d.size(0), self.in_ch, 2, 1)
hatten_d = self.inp_gs(hatten_d, temp=temperature, force_hard=True)
x = x * hatten_d[:, :, 1].unsqueeze(2)
return x, hatten_d[:, :, 1]
| import torch
import torch.nn as nn
from .gumbel import GumbleSoftmax
class GateModule(nn.Module):
def __init__(self, in_ch, act='relu', kernel_size=None, doubleGate=False, dwLA=False):
super(GateModule, self).__init__()
self.doubleGate, self.dwLA = doubleGate, dwLA
self.inp_gs = GumbleSoftmax()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.in_ch = in_ch
if act == 'relu':
relu = nn.ReLU
elif act == 'relu6':
relu = nn.ReLU6
else: raise NotImplementedError
if dwLA:
if doubleGate:
self.inp_att = nn.Sequential(
nn.Conv2d(in_ch, in_ch, kernel_size=kernel_size, stride=1, padding=0, groups=in_ch,
bias=True),
nn.BatchNorm2d(in_ch),
relu(inplace=True),
nn.Conv2d(in_ch, in_ch, kernel_size=1, stride=1, padding=0, bias=True),
nn.Sigmoid()
)
self.inp_gate = nn.Sequential(
nn.Conv2d(in_ch, in_ch, kernel_size=kernel_size, stride=1, padding=0, groups=in_ch, bias=True),
nn.BatchNorm2d(in_ch),
relu(inplace=True),
nn.Conv2d(in_ch, in_ch, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(in_ch),
)
self.inp_gate_l = nn.Conv2d(in_ch, in_ch * 2, kernel_size=1, stride=1, padding=0, groups=in_ch,
bias=True)
else:
if doubleGate:
reduction = 4
self.inp_att = nn.Sequential(
nn.Conv2d(in_ch, in_ch // reduction, kernel_size=1, stride=1, padding=0, bias=True),
relu(inplace=True),
nn.Conv2d(in_ch // reduction, in_ch, kernel_size=1, stride=1, padding=0, bias=True),
nn.Sigmoid()
)
self.inp_gate = nn.Sequential(
nn.Conv2d(in_ch, in_ch, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(in_ch),
relu(inplace=True),
)
self.inp_gate_l = nn.Conv2d(in_ch, in_ch * 2, kernel_size=1, stride=1, padding=0, groups=in_ch, bias=True)
def forward(self, y, cb, cr, temperature=1.):
hatten_y, hatten_cb, hatten_cr = self.avg_pool(y), self.avg_pool(cb), self.avg_pool(cr)
hatten_d2 = torch.cat((hatten_y, hatten_cb, hatten_cr), dim=1)
hatten_d2 = self.inp_gate(hatten_d2)
hatten_d2 = self.inp_gate_l(hatten_d2)
hatten_d2 = hatten_d2.reshape(hatten_d2.size(0), self.in_ch, 2, 1)
hatten_d2 = self.inp_gs(hatten_d2, temp=temperature, force_hard=True)
y = y * hatten_d2[:, :64, 1].unsqueeze(2)
cb = cb * hatten_d2[:, 64:128, 1].unsqueeze(2)
cr = cr * hatten_d2[:, 128:, 1].unsqueeze(2)
return y, cb, cr, hatten_d2[:, :, 1]
class GateModule192(nn.Module):
def __init__(self, act='relu'):
super(GateModule192, self).__init__()
self.inp_gs = GumbleSoftmax()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.in_ch = in_ch = 192
if act == 'relu':
relu = nn.ReLU
elif act == 'relu6':
relu = nn.ReLU6
else: raise NotImplementedError
self.inp_gate = nn.Sequential(
nn.Conv2d(in_ch, in_ch, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(in_ch),
relu(inplace=True),
)
self.inp_gate_l = nn.Conv2d(in_ch, in_ch * 2, kernel_size=1, stride=1, padding=0, groups=in_ch, bias=True)
def forward(self, x, temperature=1.):
hatten = self.avg_pool(x)
hatten_d = self.inp_gate(hatten)
hatten_d = self.inp_gate_l(hatten_d)
hatten_d = hatten_d.reshape(hatten_d.size(0), self.in_ch, 2, 1)
hatten_d = self.inp_gs(hatten_d, temp=temperature, force_hard=True)
x = x * hatten_d[:, :, 1].unsqueeze(2)
return x, hatten_d[:, :, 1] | none | 1 | 2.552623 | 3 |
|
gssClients/gssPythonClients/delete_gss.py | SemWES/client_libs | 0 | 6632982 | <reponame>SemWES/client_libs
#!/bin/env python
# Copyright STIFTELSEN SINTEF 2016
import suds
import urllib2
import sys
if len(sys.argv) < 3:
print ("Usage:")
print ("\t %s gss-url token" % sys.argv[0])
exit()
# get url:
url = sys.argv[1]
sessionToken = sys.argv[2]
wsdl_url = "https://api.caxman.eu/sintef/infrastructure/gss-0.1/FileUtilities?wsdl"
client = suds.client.Client(wsdl_url)
resourceInformation = client.service.getResourceInformation(url, sessionToken)
deleteDescription = resourceInformation.deleteDescription
if deleteDescription.supported:
headers = {}
headers[deleteDescription.sessionTokenField] = sessionToken
if hasattr(deleteDescription, "headers"):
for headerField in deleteDescription.headers:
headers[headerField.key] = headerField.value
request = urllib2.Request(url = deleteDescription.url, headers=headers)
request.get_method = lambda: deleteDescription.httpMethod
result = urllib2.urlopen(request)
else:
print "Delete is not supported for the given gss_url"
| #!/bin/env python
# Copyright STIFTELSEN SINTEF 2016
import suds
import urllib2
import sys
if len(sys.argv) < 3:
print ("Usage:")
print ("\t %s gss-url token" % sys.argv[0])
exit()
# get url:
url = sys.argv[1]
sessionToken = sys.argv[2]
wsdl_url = "https://api.caxman.eu/sintef/infrastructure/gss-0.1/FileUtilities?wsdl"
client = suds.client.Client(wsdl_url)
resourceInformation = client.service.getResourceInformation(url, sessionToken)
deleteDescription = resourceInformation.deleteDescription
if deleteDescription.supported:
headers = {}
headers[deleteDescription.sessionTokenField] = sessionToken
if hasattr(deleteDescription, "headers"):
for headerField in deleteDescription.headers:
headers[headerField.key] = headerField.value
request = urllib2.Request(url = deleteDescription.url, headers=headers)
request.get_method = lambda: deleteDescription.httpMethod
result = urllib2.urlopen(request)
else:
print "Delete is not supported for the given gss_url" | en | 0.426234 | #!/bin/env python # Copyright STIFTELSEN SINTEF 2016 # get url: | 2.507796 | 3 |
html_tag_count.py | daoudclarke/tinysearch-spark | 207 | 6632983 | <reponame>daoudclarke/tinysearch-spark
import re
from collections import Counter
from sparkcc import CCSparkJob
class TagCountJob(CCSparkJob):
""" Count HTML tag names in Common Crawl WARC files"""
name = "TagCount"
# match HTML tags (element names) on binary HTML data
html_tag_pattern = re.compile(b'<([a-z0-9]+)')
def process_record(self, record):
if record.rec_type != 'response':
# skip over WARC request or metadata records
return
if not self.is_html(record):
# skip non-HTML or unknown content types
return
data = record.content_stream().read()
counts = Counter(TagCountJob.html_tag_pattern.findall(data))
for tag, count in counts.items():
yield tag.decode('ascii').lower(), count
if __name__ == '__main__':
job = TagCountJob()
job.run()
| import re
from collections import Counter
from sparkcc import CCSparkJob
class TagCountJob(CCSparkJob):
""" Count HTML tag names in Common Crawl WARC files"""
name = "TagCount"
# match HTML tags (element names) on binary HTML data
html_tag_pattern = re.compile(b'<([a-z0-9]+)')
def process_record(self, record):
if record.rec_type != 'response':
# skip over WARC request or metadata records
return
if not self.is_html(record):
# skip non-HTML or unknown content types
return
data = record.content_stream().read()
counts = Counter(TagCountJob.html_tag_pattern.findall(data))
for tag, count in counts.items():
yield tag.decode('ascii').lower(), count
if __name__ == '__main__':
job = TagCountJob()
job.run() | en | 0.423981 | Count HTML tag names in Common Crawl WARC files # match HTML tags (element names) on binary HTML data # skip over WARC request or metadata records # skip non-HTML or unknown content types | 2.868656 | 3 |
lib/python2.7/site-packages/scipy/misc/tests/test_common.py | wfehrnstrom/harmonize | 18 | 6632984 | from __future__ import division, print_function, absolute_import
from numpy.testing import assert_equal, assert_
from scipy.misc import pade, logsumexp, face, ascent
from scipy.special import logsumexp as sc_logsumexp
from scipy.interpolate import pade as i_pade
def test_logsumexp():
# make sure logsumexp can be imported from either scipy.misc or
# scipy.special
assert_(logsumexp is sc_logsumexp)
def test_pade():
assert_(pade is i_pade)
def test_face():
assert_equal(face().shape, (768, 1024, 3))
def test_ascent():
assert_equal(ascent().shape, (512, 512))
| from __future__ import division, print_function, absolute_import
from numpy.testing import assert_equal, assert_
from scipy.misc import pade, logsumexp, face, ascent
from scipy.special import logsumexp as sc_logsumexp
from scipy.interpolate import pade as i_pade
def test_logsumexp():
# make sure logsumexp can be imported from either scipy.misc or
# scipy.special
assert_(logsumexp is sc_logsumexp)
def test_pade():
assert_(pade is i_pade)
def test_face():
assert_equal(face().shape, (768, 1024, 3))
def test_ascent():
assert_equal(ascent().shape, (512, 512))
| en | 0.641693 | # make sure logsumexp can be imported from either scipy.misc or # scipy.special | 2.258261 | 2 |
demo/gesture_inference.py | jiangtaoo2333/StaticGestureRecognition | 0 | 6632985 | <gh_stars>0
import argparse
import os
import os.path as osp
import sys
import time
import mmcv
import numpy as np
import torch
from mmcv import Config
import torch.nn as nn
import cv2
dirpath = osp.dirname(osp.dirname(osp.abspath(__file__))).replace('\\','/')
sys.path.append(dirpath)
import timm
def get_args():
parser = argparse.ArgumentParser("MultiTaskOnFace build by Jiangtao")
parser.add_argument('--config',
default='{}/configs/gesture/dms_easyNet_crossentroy_cosineannealing_augmix.py'.format(dirpath),help='train config file path')
args = parser.parse_args()
return args
args = get_args()
cfg = Config.fromfile(args.config)
class StaticGesture():
def __init__(self,
cfg=cfg,
checkpoint='easyNet_DMS_gender_best_0.967529296875.pkl'):
self.cfg = cfg
self.model = timm.create_model(self.cfg.modelName, pretrained=False, num_classes=self.cfg.numClasses,
in_chans=self.cfg.channels).cuda()
filename = self.cfg.filename
basefilename = osp.basename(filename)
basefilename = osp.splitext(basefilename)[0]
self.modelPath = osp.join('{}/work_dirs/'.format(dirpath),basefilename)
self.modelPath = osp.join(self.modelPath,checkpoint)
print('self.modelPath:',self.modelPath)
self.model.load_state_dict(torch.load(self.modelPath),strict=False)
self.model.cuda().eval()
def classify(self,image,box):
'''
image is numpy h w
box is [x,y,x,y]
'''
scale = 0.10
xmin,ymin,xmax,ymax = box
roiw = xmax - xmin
roih = ymax - ymin
xmin -= roiw * scale
xmax += roiw * scale
ymin -= roih * scale
ymax += roih * scale
xmin = np.clip(xmin,0,image.shape[1]-1)
xmax = np.clip(xmax,0,image.shape[1]-1)
ymin = np.clip(ymin,0,image.shape[0]-1)
ymax = np.clip(ymax,0,image.shape[0]-1)
x1 = int(xmin)
x2 = int(xmax)
y1 = int(ymin)
y2 = int(ymax)
img = image[y1:y2,x1:x2]
# 输入图片预处理
img = cv2.resize(img, (self.cfg.imgSize,self.cfg.imgSize), interpolation = cv2.INTER_CUBIC)*0.0039216
img = img[np.newaxis] # 1 128 128
img_ = torch.from_numpy(img) # 1 128 128
img_ = img_.unsqueeze_(0) # 1 1 128 128
img_ = img_.cuda()
pre_ = self.model(img_.float())
m = nn.Softmax(dim=1)
pre_ = m(pre_)
pre_ = pre_.cpu().detach().numpy().reshape((1,-1))
res = np.argmax(pre_,axis=-1)
if res[0] == 0:
label = 'palm'
if res[0] == 1:
label = 'singleFinger'
if res[0] == 2:
label = 'doubleFinger'
score = pre_[0][res[0]]
return label,score
if __name__ == '__main__':
SataticGestureCls = StaticGesture()
img = cv2.imread('./demo/images/1.jpg',0)
box = [1057,504,1207,706]
x1,y1,x2,y2 = box
label,score = SataticGestureCls.classify(img,box)
print(label)
print(score)
cv2.rectangle(img, (int(x1),int(y1)), (int(x2),int(y2)), (0,255,0), 2)
cv2.imshow('img',img)
key = cv2.waitKey(0)
if key == ord('q'):
cv2.destroyAllWindows()
| import argparse
import os
import os.path as osp
import sys
import time
import mmcv
import numpy as np
import torch
from mmcv import Config
import torch.nn as nn
import cv2
dirpath = osp.dirname(osp.dirname(osp.abspath(__file__))).replace('\\','/')
sys.path.append(dirpath)
import timm
def get_args():
parser = argparse.ArgumentParser("MultiTaskOnFace build by Jiangtao")
parser.add_argument('--config',
default='{}/configs/gesture/dms_easyNet_crossentroy_cosineannealing_augmix.py'.format(dirpath),help='train config file path')
args = parser.parse_args()
return args
args = get_args()
cfg = Config.fromfile(args.config)
class StaticGesture():
def __init__(self,
cfg=cfg,
checkpoint='easyNet_DMS_gender_best_0.967529296875.pkl'):
self.cfg = cfg
self.model = timm.create_model(self.cfg.modelName, pretrained=False, num_classes=self.cfg.numClasses,
in_chans=self.cfg.channels).cuda()
filename = self.cfg.filename
basefilename = osp.basename(filename)
basefilename = osp.splitext(basefilename)[0]
self.modelPath = osp.join('{}/work_dirs/'.format(dirpath),basefilename)
self.modelPath = osp.join(self.modelPath,checkpoint)
print('self.modelPath:',self.modelPath)
self.model.load_state_dict(torch.load(self.modelPath),strict=False)
self.model.cuda().eval()
def classify(self,image,box):
'''
image is numpy h w
box is [x,y,x,y]
'''
scale = 0.10
xmin,ymin,xmax,ymax = box
roiw = xmax - xmin
roih = ymax - ymin
xmin -= roiw * scale
xmax += roiw * scale
ymin -= roih * scale
ymax += roih * scale
xmin = np.clip(xmin,0,image.shape[1]-1)
xmax = np.clip(xmax,0,image.shape[1]-1)
ymin = np.clip(ymin,0,image.shape[0]-1)
ymax = np.clip(ymax,0,image.shape[0]-1)
x1 = int(xmin)
x2 = int(xmax)
y1 = int(ymin)
y2 = int(ymax)
img = image[y1:y2,x1:x2]
# 输入图片预处理
img = cv2.resize(img, (self.cfg.imgSize,self.cfg.imgSize), interpolation = cv2.INTER_CUBIC)*0.0039216
img = img[np.newaxis] # 1 128 128
img_ = torch.from_numpy(img) # 1 128 128
img_ = img_.unsqueeze_(0) # 1 1 128 128
img_ = img_.cuda()
pre_ = self.model(img_.float())
m = nn.Softmax(dim=1)
pre_ = m(pre_)
pre_ = pre_.cpu().detach().numpy().reshape((1,-1))
res = np.argmax(pre_,axis=-1)
if res[0] == 0:
label = 'palm'
if res[0] == 1:
label = 'singleFinger'
if res[0] == 2:
label = 'doubleFinger'
score = pre_[0][res[0]]
return label,score
if __name__ == '__main__':
SataticGestureCls = StaticGesture()
img = cv2.imread('./demo/images/1.jpg',0)
box = [1057,504,1207,706]
x1,y1,x2,y2 = box
label,score = SataticGestureCls.classify(img,box)
print(label)
print(score)
cv2.rectangle(img, (int(x1),int(y1)), (int(x2),int(y2)), (0,255,0), 2)
cv2.imshow('img',img)
key = cv2.waitKey(0)
if key == ord('q'):
cv2.destroyAllWindows() | en | 0.292993 | image is numpy h w box is [x,y,x,y] # 输入图片预处理 # 1 128 128 # 1 128 128 # 1 1 128 128 | 2.095242 | 2 |
Exercicios/ex059.py | jlsmirandela/Curso_Python | 0 | 6632986 | pv = int(input('Insira o primeiro valor - '))
sv = int(input('Insira o segundo valor - '))
op = 0
while op != 5:
print('''[ 1 ] Somar
[ 2 ] Multiplicar
[ 3 ] Maior
[ 4 ] Novos números
[ 5 ] Sair''')
op = int(input('>>>>>>>> Qual a sua opção? - '))
while op not in range(0, 6):
op = int(input('Opção inválida, escolha uma opção? - '))
if op == 1:
print('A soma entre {} e {} é {}.\n'.format(pv, sv, pv + sv))
elif op == 2:
print('A multiplicação entre {} e {} é {}\n'.format(pv, sv, pv * sv))
elif op == 3:
if pv > sv:
print('O primeiro valor ({}) é MAIOR que o segundo valor ({})\n'.format(pv, sv))
elif pv < sv:
print('O segundo valor ({}) é MAIOR que o primeiro valor ({}\n)'.format(sv, pv))
else:
print('Os valores são IGUAIS.\n')
elif op == 4:
pv = int(input('Insira o primeiro valor - '))
sv = int(input('Insira o segundo valor - '))
print('Fim do programa')
| pv = int(input('Insira o primeiro valor - '))
sv = int(input('Insira o segundo valor - '))
op = 0
while op != 5:
print('''[ 1 ] Somar
[ 2 ] Multiplicar
[ 3 ] Maior
[ 4 ] Novos números
[ 5 ] Sair''')
op = int(input('>>>>>>>> Qual a sua opção? - '))
while op not in range(0, 6):
op = int(input('Opção inválida, escolha uma opção? - '))
if op == 1:
print('A soma entre {} e {} é {}.\n'.format(pv, sv, pv + sv))
elif op == 2:
print('A multiplicação entre {} e {} é {}\n'.format(pv, sv, pv * sv))
elif op == 3:
if pv > sv:
print('O primeiro valor ({}) é MAIOR que o segundo valor ({})\n'.format(pv, sv))
elif pv < sv:
print('O segundo valor ({}) é MAIOR que o primeiro valor ({}\n)'.format(sv, pv))
else:
print('Os valores são IGUAIS.\n')
elif op == 4:
pv = int(input('Insira o primeiro valor - '))
sv = int(input('Insira o segundo valor - '))
print('Fim do programa')
| pt | 0.27341 | [ 1 ] Somar [ 2 ] Multiplicar [ 3 ] Maior [ 4 ] Novos números [ 5 ] Sair | 3.909227 | 4 |
torchsso/__init__.py | fmahdisoltani/multimodal_madam | 0 | 6632987 | from torchsso import optim # NOQA
from torchsso import autograd # NOQA
from torchsso import utils # NOQA
from torchsso.curv.curvature import Curvature, DiagCurvature, KronCurvature # NOQA
from torchsso.curv.cov.linear import CovLinear, DiagCovLinear, KronCovLinear, DiagGMMLinear # NOQA
from torchsso.curv.cov.conv import CovConv2d, DiagCovConv2d, KronCovConv2d # NOQA
from torchsso.curv.cov.batchnorm import CovBatchNorm1d, DiagCovBatchNorm1d, CovBatchNorm2d, DiagCovBatchNorm2d # NOQA
from torchsso.curv.hessian import KronHessian # NOQA
from torchsso.curv.hessian.linear import KronHessianLinear # NOQA
from torchsso.curv.hessian.conv import KronHessianConv2d # NOQA
from torchsso.curv.fisher import get_closure_for_fisher # NOQA
from torchsso.curv.fisher import Fisher # NOQA
from torchsso.curv.fisher.linear import DiagFisherLinear, KronFisherLinear # NOQA
from torchsso.curv.fisher.conv import DiagFisherConv2d, KronFisherConv2d # NOQA
from torchsso.curv.fisher.batchnorm import DiagFisherBatchNorm2d # NOQA
| from torchsso import optim # NOQA
from torchsso import autograd # NOQA
from torchsso import utils # NOQA
from torchsso.curv.curvature import Curvature, DiagCurvature, KronCurvature # NOQA
from torchsso.curv.cov.linear import CovLinear, DiagCovLinear, KronCovLinear, DiagGMMLinear # NOQA
from torchsso.curv.cov.conv import CovConv2d, DiagCovConv2d, KronCovConv2d # NOQA
from torchsso.curv.cov.batchnorm import CovBatchNorm1d, DiagCovBatchNorm1d, CovBatchNorm2d, DiagCovBatchNorm2d # NOQA
from torchsso.curv.hessian import KronHessian # NOQA
from torchsso.curv.hessian.linear import KronHessianLinear # NOQA
from torchsso.curv.hessian.conv import KronHessianConv2d # NOQA
from torchsso.curv.fisher import get_closure_for_fisher # NOQA
from torchsso.curv.fisher import Fisher # NOQA
from torchsso.curv.fisher.linear import DiagFisherLinear, KronFisherLinear # NOQA
from torchsso.curv.fisher.conv import DiagFisherConv2d, KronFisherConv2d # NOQA
from torchsso.curv.fisher.batchnorm import DiagFisherBatchNorm2d # NOQA
| ur | 0.237172 | # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA | 1.515229 | 2 |
mintamazontagger/category.py | glassdimly/mint-amazon-tagger | 0 | 6632988 |
# The default Mint category.
DEFAULT_MINT_CATEGORY = 'Shopping'
# The default return category.
DEFAULT_MINT_RETURN_CATEGORY = 'Returned Purchase'
# A category mapping of Amazon Order History "Categories" into Mint
# "Categories".
AMAZON_TO_MINT_CATEGORY = {
'Accessory': DEFAULT_MINT_CATEGORY,
'Apparel': 'Clothing',
'Audio CD': 'Music',
'Automotive': 'Auto & Transport',
'Baby Product': 'Baby Supplies',
'Blu-ray': 'Movies & DVDs',
'CD-ROM': 'Music',
'Camera': 'Electronics & Software',
'Electronics': 'Electronics & Software',
'Eyewear': 'Eyecare',
'Grocery': 'Groceries',
'Hardcover': 'Books',
'Health and Beauty': 'Personal Care',
'Home': 'Home',
'Kitchen': DEFAULT_MINT_CATEGORY,
'Lawn & Patio': 'Lawn & Garden',
'Luggage': DEFAULT_MINT_CATEGORY,
'Mass Market Paperback': 'Books',
'Misc.': DEFAULT_MINT_CATEGORY,
'Office Product': 'Office Supplies',
'Paperback': 'Books',
'Personal Computers': 'Electronics & Software',
'Print on Demand': DEFAULT_MINT_CATEGORY,
'Shoes': 'Clothing',
'Software Download': 'Electronics & Software',
'Sports': 'Sporting Goods',
'Sports Apparel': 'Sporting Goods',
'T-shirt': 'Clothing',
'Tools & Hardware': 'Home',
'Tools & Home Improvement': 'Home Improvement',
'Toy': 'Toys',
'Video Game': 'Electronics & Software',
'Watch': DEFAULT_MINT_CATEGORY,
'Wine': 'Alcohol & Bars',
'Wireless Phone Accessory': 'Electronics & Software',
}
# Pulled early 2018.
DEFAULT_MINT_CATEGORIES_TO_IDS = {
'ATM Fee': 1605,
'Advertising': 1701,
'Air Travel': 1501,
'Alcohol & Bars': 708,
'Allowance': 610,
'Amusement': 102,
'Arts': 101,
'Auto & Transport': 14,
'Auto Insurance': 1405,
'Auto Payment': 1404,
'Baby Supplies': 611,
'Babysitter & Daycare': 602,
'Bank Fee': 1606,
'Bills & Utilities': 13,
'Bonus': 3004,
'Books': 202,
'Books & Supplies': 1003,
'Business Services': 17,
'Buy': 5004,
'Cash & ATM': 2001,
'Charity': 802,
'Check': 2002,
'Child Support': 603,
'Clothing': 201,
'Coffee Shops': 704,
'Credit Card Payment': 2101,
'Dentist': 501,
'Deposit': 5001,
'Dividend & Cap Gains': 5003,
'Doctor': 502,
'Education': 10,
'Electronics & Software': 204,
'Entertainment': 1,
'Eyecare': 503,
'Fast Food': 706,
'Federal Tax': 1901,
'Fees & Charges': 16,
'Finance Charge': 1604,
'Financial': 11,
'Financial Advisor': 1105,
'Food & Dining': 7,
'Furnishings': 1201,
'Gas & Fuel': 1401,
'Gift': 801,
'Gifts & Donations': 8,
'Groceries': 701,
'Gym': 507,
'Hair': 403,
'Health & Fitness': 5,
'Health Insurance': 506,
'Hide from Budgets & Trends': 40,
'Hobbies': 206,
'Home': 12,
'Home Improvement': 1203,
'Home Insurance': 1206,
'Home Phone': 1302,
'Home Services': 1204,
'Home Supplies': 1208,
'Hotel': 1502,
'Income': 30,
'Interest Income': 3005,
'Internet': 1303,
'Investments': 50,
'Kids': 6,
'Kids Activities': 609,
'Kitchen': 1562103,
'Late Fee': 1602,
'Laundry': 406,
'Lawn & Garden': 1202,
'Legal': 1705,
'Life Insurance': 1102,
'Loan Fees and Charges': 6005,
'Loan Insurance': 6002,
'Loan Interest': 6004,
'Loan Payment': 6001,
'Loan Principal': 6003,
'Loans': 60,
'Local Tax': 1903,
'Misc Expenses': 70,
'Mobile Phone': 1304,
'Mortgage & Rent': 1207,
'Movies & DVDs': 104,
'Music': 103,
'Newspapers & Magazines': 105,
'Office Supplies': 1702,
'Orthodontics': 1671958,
'Parking': 1402,
'Paycheck': 3001,
'Personal Care': 4,
'Pet Food & Supplies': 901,
'Pet Grooming': 902,
'Pets': 9,
'Pharmacy': 505,
'Printing': 1703,
'Property Tax': 1905,
'Public Transportation': 1406,
'Rail': 1562093,
'Reimbursement': 3006,
'Rental Car & Taxi': 1503,
'Rental Income': 3007,
'Restaurants': 707,
'Returned Purchase': 3003,
'Sales Tax': 1904,
'Sell': 5005,
'Service & Parts': 1403,
'Service Fee': 1601,
'Shipping': 1704,
'Shopping': 2,
'Spa & Massage': 404,
'Sporting Goods': 207,
'Sports': 508,
'State Tax': 1902,
'Student Loan': 1002,
'Taxes': 19,
'Television': 1301,
'Toys': 606,
'Trade Commissions': 1607,
'Transfer': 21,
'Transfer for Cash Spending': 2102,
'Travel': 15,
'Tuition': 1001,
'Uncategorized': 20,
'Utilities': 1306,
'Vacation': 1504,
'Veterinary': 903,
'Withdrawal': 5002,
}
|
# The default Mint category.
DEFAULT_MINT_CATEGORY = 'Shopping'
# The default return category.
DEFAULT_MINT_RETURN_CATEGORY = 'Returned Purchase'
# A category mapping of Amazon Order History "Categories" into Mint
# "Categories".
AMAZON_TO_MINT_CATEGORY = {
'Accessory': DEFAULT_MINT_CATEGORY,
'Apparel': 'Clothing',
'Audio CD': 'Music',
'Automotive': 'Auto & Transport',
'Baby Product': 'Baby Supplies',
'Blu-ray': 'Movies & DVDs',
'CD-ROM': 'Music',
'Camera': 'Electronics & Software',
'Electronics': 'Electronics & Software',
'Eyewear': 'Eyecare',
'Grocery': 'Groceries',
'Hardcover': 'Books',
'Health and Beauty': 'Personal Care',
'Home': 'Home',
'Kitchen': DEFAULT_MINT_CATEGORY,
'Lawn & Patio': 'Lawn & Garden',
'Luggage': DEFAULT_MINT_CATEGORY,
'Mass Market Paperback': 'Books',
'Misc.': DEFAULT_MINT_CATEGORY,
'Office Product': 'Office Supplies',
'Paperback': 'Books',
'Personal Computers': 'Electronics & Software',
'Print on Demand': DEFAULT_MINT_CATEGORY,
'Shoes': 'Clothing',
'Software Download': 'Electronics & Software',
'Sports': 'Sporting Goods',
'Sports Apparel': 'Sporting Goods',
'T-shirt': 'Clothing',
'Tools & Hardware': 'Home',
'Tools & Home Improvement': 'Home Improvement',
'Toy': 'Toys',
'Video Game': 'Electronics & Software',
'Watch': DEFAULT_MINT_CATEGORY,
'Wine': 'Alcohol & Bars',
'Wireless Phone Accessory': 'Electronics & Software',
}
# Pulled early 2018.
DEFAULT_MINT_CATEGORIES_TO_IDS = {
'ATM Fee': 1605,
'Advertising': 1701,
'Air Travel': 1501,
'Alcohol & Bars': 708,
'Allowance': 610,
'Amusement': 102,
'Arts': 101,
'Auto & Transport': 14,
'Auto Insurance': 1405,
'Auto Payment': 1404,
'Baby Supplies': 611,
'Babysitter & Daycare': 602,
'Bank Fee': 1606,
'Bills & Utilities': 13,
'Bonus': 3004,
'Books': 202,
'Books & Supplies': 1003,
'Business Services': 17,
'Buy': 5004,
'Cash & ATM': 2001,
'Charity': 802,
'Check': 2002,
'Child Support': 603,
'Clothing': 201,
'Coffee Shops': 704,
'Credit Card Payment': 2101,
'Dentist': 501,
'Deposit': 5001,
'Dividend & Cap Gains': 5003,
'Doctor': 502,
'Education': 10,
'Electronics & Software': 204,
'Entertainment': 1,
'Eyecare': 503,
'Fast Food': 706,
'Federal Tax': 1901,
'Fees & Charges': 16,
'Finance Charge': 1604,
'Financial': 11,
'Financial Advisor': 1105,
'Food & Dining': 7,
'Furnishings': 1201,
'Gas & Fuel': 1401,
'Gift': 801,
'Gifts & Donations': 8,
'Groceries': 701,
'Gym': 507,
'Hair': 403,
'Health & Fitness': 5,
'Health Insurance': 506,
'Hide from Budgets & Trends': 40,
'Hobbies': 206,
'Home': 12,
'Home Improvement': 1203,
'Home Insurance': 1206,
'Home Phone': 1302,
'Home Services': 1204,
'Home Supplies': 1208,
'Hotel': 1502,
'Income': 30,
'Interest Income': 3005,
'Internet': 1303,
'Investments': 50,
'Kids': 6,
'Kids Activities': 609,
'Kitchen': 1562103,
'Late Fee': 1602,
'Laundry': 406,
'Lawn & Garden': 1202,
'Legal': 1705,
'Life Insurance': 1102,
'Loan Fees and Charges': 6005,
'Loan Insurance': 6002,
'Loan Interest': 6004,
'Loan Payment': 6001,
'Loan Principal': 6003,
'Loans': 60,
'Local Tax': 1903,
'Misc Expenses': 70,
'Mobile Phone': 1304,
'Mortgage & Rent': 1207,
'Movies & DVDs': 104,
'Music': 103,
'Newspapers & Magazines': 105,
'Office Supplies': 1702,
'Orthodontics': 1671958,
'Parking': 1402,
'Paycheck': 3001,
'Personal Care': 4,
'Pet Food & Supplies': 901,
'Pet Grooming': 902,
'Pets': 9,
'Pharmacy': 505,
'Printing': 1703,
'Property Tax': 1905,
'Public Transportation': 1406,
'Rail': 1562093,
'Reimbursement': 3006,
'Rental Car & Taxi': 1503,
'Rental Income': 3007,
'Restaurants': 707,
'Returned Purchase': 3003,
'Sales Tax': 1904,
'Sell': 5005,
'Service & Parts': 1403,
'Service Fee': 1601,
'Shipping': 1704,
'Shopping': 2,
'Spa & Massage': 404,
'Sporting Goods': 207,
'Sports': 508,
'State Tax': 1902,
'Student Loan': 1002,
'Taxes': 19,
'Television': 1301,
'Toys': 606,
'Trade Commissions': 1607,
'Transfer': 21,
'Transfer for Cash Spending': 2102,
'Travel': 15,
'Tuition': 1001,
'Uncategorized': 20,
'Utilities': 1306,
'Vacation': 1504,
'Veterinary': 903,
'Withdrawal': 5002,
}
| en | 0.491832 | # The default Mint category. # The default return category. # A category mapping of Amazon Order History "Categories" into Mint # "Categories". # Pulled early 2018. | 2.003522 | 2 |
ceph_deploy/tests/test_install.py | weisongf/ceph-deploy | 353 | 6632989 | <gh_stars>100-1000
from mock import Mock
from ceph_deploy import install
class TestSanitizeArgs(object):
def setup(self):
self.args = Mock()
# set the default behavior we set in cli.py
self.args.default_release = False
self.args.stable = None
def test_args_release_not_specified(self):
self.args.release = None
result = install.sanitize_args(self.args)
# XXX
# we should get `args.release` to be the latest release
# but we don't want to be updating this test every single
# time there is a new default value, and we can't programatically
# change that. Future improvement: make the default release a
# variable in `ceph_deploy/__init__.py`
assert result.default_release is True
def test_args_release_is_specified(self):
self.args.release = 'dumpling'
result = install.sanitize_args(self.args)
assert result.default_release is False
def test_args_release_stable_is_used(self):
self.args.stable = 'dumpling'
result = install.sanitize_args(self.args)
assert result.release == 'dumpling'
def test_args_stable_is_not_used(self):
self.args.release = 'dumpling'
result = install.sanitize_args(self.args)
assert result.stable is None
class TestDetectComponents(object):
def setup(self):
self.args = Mock()
# default values for install_* flags
self.args.install_all = False
self.args.install_mds = False
self.args.install_mgr = False
self.args.install_mon = False
self.args.install_osd = False
self.args.install_rgw = False
self.args.install_tests = False
self.args.install_common = False
self.args.repo = False
self.distro = Mock()
def test_install_with_repo_option_returns_no_packages(self):
self.args.repo = True
result = install.detect_components(self.args, self.distro)
assert result == []
def test_install_all_returns_all_packages_deb(self):
self.args.install_all = True
self.distro.is_rpm = False
self.distro.is_deb = True
self.distro.is_pkgtarxz = False
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'radosgw'
])
def test_install_all_with_other_options_returns_all_packages_deb(self):
self.distro.is_rpm = False
self.distro.is_deb = True
self.distro.is_pkgtarxz = False
self.args.install_all = True
self.args.install_mds = True
self.args.install_mgr = True
self.args.install_mon = True
self.args.install_osd = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'radosgw'
])
def test_install_all_returns_all_packages_rpm(self):
self.args.install_all = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'ceph-radosgw'
])
def test_install_all_with_other_options_returns_all_packages_rpm(self):
self.args.install_all = True
self.args.install_mds = True
self.args.install_mon = True
self.args.install_mgr = True
self.args.install_osd = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'ceph-radosgw'
])
def test_install_all_returns_all_packages_pkgtarxz(self):
self.args.install_all = True
self.distro.is_rpm = False
self.distro.is_deb = False
self.distro.is_pkgtarxz = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph',
])
def test_install_all_with_other_options_returns_all_packages_pkgtarxz(self):
self.distro.is_rpm = False
self.distro.is_deb = False
self.distro.is_pkgtarxz = True
self.args.install_all = True
self.args.install_mds = True
self.args.install_mgr = True
self.args.install_mon = True
self.args.install_osd = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph',
])
def test_install_only_one_component(self):
self.args.install_osd = True
result = install.detect_components(self.args, self.distro)
assert result == ['ceph-osd']
def test_install_a_couple_of_components(self):
self.args.install_osd = True
self.args.install_mds = True
self.args.install_mgr = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted(['ceph-osd', 'ceph-mds', 'ceph-mgr'])
def test_install_tests(self):
self.args.install_tests = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted(['ceph-test'])
def test_install_all_should_be_default_when_no_options_passed(self):
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'ceph-radosgw'
])
| from mock import Mock
from ceph_deploy import install
class TestSanitizeArgs(object):
def setup(self):
self.args = Mock()
# set the default behavior we set in cli.py
self.args.default_release = False
self.args.stable = None
def test_args_release_not_specified(self):
self.args.release = None
result = install.sanitize_args(self.args)
# XXX
# we should get `args.release` to be the latest release
# but we don't want to be updating this test every single
# time there is a new default value, and we can't programatically
# change that. Future improvement: make the default release a
# variable in `ceph_deploy/__init__.py`
assert result.default_release is True
def test_args_release_is_specified(self):
self.args.release = 'dumpling'
result = install.sanitize_args(self.args)
assert result.default_release is False
def test_args_release_stable_is_used(self):
self.args.stable = 'dumpling'
result = install.sanitize_args(self.args)
assert result.release == 'dumpling'
def test_args_stable_is_not_used(self):
self.args.release = 'dumpling'
result = install.sanitize_args(self.args)
assert result.stable is None
class TestDetectComponents(object):
def setup(self):
self.args = Mock()
# default values for install_* flags
self.args.install_all = False
self.args.install_mds = False
self.args.install_mgr = False
self.args.install_mon = False
self.args.install_osd = False
self.args.install_rgw = False
self.args.install_tests = False
self.args.install_common = False
self.args.repo = False
self.distro = Mock()
def test_install_with_repo_option_returns_no_packages(self):
self.args.repo = True
result = install.detect_components(self.args, self.distro)
assert result == []
def test_install_all_returns_all_packages_deb(self):
self.args.install_all = True
self.distro.is_rpm = False
self.distro.is_deb = True
self.distro.is_pkgtarxz = False
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'radosgw'
])
def test_install_all_with_other_options_returns_all_packages_deb(self):
self.distro.is_rpm = False
self.distro.is_deb = True
self.distro.is_pkgtarxz = False
self.args.install_all = True
self.args.install_mds = True
self.args.install_mgr = True
self.args.install_mon = True
self.args.install_osd = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'radosgw'
])
def test_install_all_returns_all_packages_rpm(self):
self.args.install_all = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'ceph-radosgw'
])
def test_install_all_with_other_options_returns_all_packages_rpm(self):
self.args.install_all = True
self.args.install_mds = True
self.args.install_mon = True
self.args.install_mgr = True
self.args.install_osd = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'ceph-radosgw'
])
def test_install_all_returns_all_packages_pkgtarxz(self):
self.args.install_all = True
self.distro.is_rpm = False
self.distro.is_deb = False
self.distro.is_pkgtarxz = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph',
])
def test_install_all_with_other_options_returns_all_packages_pkgtarxz(self):
self.distro.is_rpm = False
self.distro.is_deb = False
self.distro.is_pkgtarxz = True
self.args.install_all = True
self.args.install_mds = True
self.args.install_mgr = True
self.args.install_mon = True
self.args.install_osd = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph',
])
def test_install_only_one_component(self):
self.args.install_osd = True
result = install.detect_components(self.args, self.distro)
assert result == ['ceph-osd']
def test_install_a_couple_of_components(self):
self.args.install_osd = True
self.args.install_mds = True
self.args.install_mgr = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted(['ceph-osd', 'ceph-mds', 'ceph-mgr'])
def test_install_tests(self):
self.args.install_tests = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted(['ceph-test'])
def test_install_all_should_be_default_when_no_options_passed(self):
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'ceph-radosgw'
]) | en | 0.778931 | # set the default behavior we set in cli.py # XXX # we should get `args.release` to be the latest release # but we don't want to be updating this test every single # time there is a new default value, and we can't programatically # change that. Future improvement: make the default release a # variable in `ceph_deploy/__init__.py` # default values for install_* flags | 2.378711 | 2 |
hawc_hal/healpix_handling/flat_sky_to_healpix.py | igherzog/hawc_hal | 7 | 6632990 | <reponame>igherzog/hawc_hal<filename>hawc_hal/healpix_handling/flat_sky_to_healpix.py<gh_stars>1-10
from builtins import object
import healpy as hp
import numpy as np
import six
from scipy.ndimage import map_coordinates
from astropy.coordinates import Galactic, ICRS
from astropy import units as u
from astropy.coordinates import UnitSphericalRepresentation
from astropy.wcs.utils import wcs_to_celestial_frame
from ..special_values import UNSEEN
from ..interpolation import FastBilinearInterpolation
ORDER = {}
ORDER['nearest-neighbor'] = 0
ORDER['bilinear'] = 1
ORDER['biquadratic'] = 2
ORDER['bicubic'] = 3
COORDSYS = {
'g': Galactic(),
'c': ICRS(),
'icrs': ICRS(),
}
def _parse_coord_system(system):
try:
return COORDSYS[system.lower()]
except KeyError: # pragma: no cover
raise ValueError("Coordinate system %s is not known" % system)
def _convert_world_coordinates(lon_in, lat_in, wcs_in, wcs_out):
frame_in, lon_in_unit, lat_in_unit = wcs_in
wcs_out = wcs_out.celestial
frame_out = wcs_to_celestial_frame(wcs_out)
lon_out_unit = u.Unit(wcs_out.wcs.cunit[0])
lat_out_unit = u.Unit(wcs_out.wcs.cunit[1])
data = UnitSphericalRepresentation(lon_in * lon_in_unit,
lat_in * lat_in_unit)
coords_in = frame_in.realize_frame(data)
coords_out = coords_in.transform_to(frame_out)
lon_out = coords_out.represent_as('unitspherical').lon.to(lon_out_unit).value
lat_out = coords_out.represent_as('unitspherical').lat.to(lat_out_unit).value
return lon_out, lat_out
class FlatSkyToHealpixTransform(object):
"""
A class to perform transformation from a flat sky projection to Healpix optimized to be used for the same
transformation over and over again.
The constructor will pre-compute all needed quantities for the transformation, and the __call__ method just applies
the transformation. This avoids to re-compute the same quantities over and over again.
"""
def __init__(self, wcs_in, coord_system_out, nside, pixels_id, input_shape, order='bilinear', nested=False):
# Look up lon, lat of pixels in output system and convert colatitude theta
# and longitude phi to longitude and latitude.
theta, phi = hp.pix2ang(nside, pixels_id, nested)
lon_out = np.degrees(phi)
lat_out = 90. - np.degrees(theta)
# Convert between celestial coordinates
coord_system_out = _parse_coord_system(coord_system_out)
with np.errstate(invalid='ignore'):
lon_in, lat_in = _convert_world_coordinates(lon_out, lat_out, (coord_system_out, u.deg, u.deg), wcs_in)
# Look up pixels in input system
yinds, xinds = wcs_in.wcs_world2pix(lon_in, lat_in, 0)
self._coords = [xinds, yinds]
# Interpolate
if isinstance(order, six.string_types):
order = ORDER[order]
self._order = order
self._interpolator = FastBilinearInterpolation(input_shape, self._coords)
def __call__(self, data, fill_value=UNSEEN):
# healpix_data = map_coordinates(data, self._coords,
# order=self._order,
# mode='constant', cval=fill_value)
healpix_data = self._interpolator(data)
return healpix_data
| from builtins import object
import healpy as hp
import numpy as np
import six
from scipy.ndimage import map_coordinates
from astropy.coordinates import Galactic, ICRS
from astropy import units as u
from astropy.coordinates import UnitSphericalRepresentation
from astropy.wcs.utils import wcs_to_celestial_frame
from ..special_values import UNSEEN
from ..interpolation import FastBilinearInterpolation
ORDER = {}
ORDER['nearest-neighbor'] = 0
ORDER['bilinear'] = 1
ORDER['biquadratic'] = 2
ORDER['bicubic'] = 3
COORDSYS = {
'g': Galactic(),
'c': ICRS(),
'icrs': ICRS(),
}
def _parse_coord_system(system):
try:
return COORDSYS[system.lower()]
except KeyError: # pragma: no cover
raise ValueError("Coordinate system %s is not known" % system)
def _convert_world_coordinates(lon_in, lat_in, wcs_in, wcs_out):
frame_in, lon_in_unit, lat_in_unit = wcs_in
wcs_out = wcs_out.celestial
frame_out = wcs_to_celestial_frame(wcs_out)
lon_out_unit = u.Unit(wcs_out.wcs.cunit[0])
lat_out_unit = u.Unit(wcs_out.wcs.cunit[1])
data = UnitSphericalRepresentation(lon_in * lon_in_unit,
lat_in * lat_in_unit)
coords_in = frame_in.realize_frame(data)
coords_out = coords_in.transform_to(frame_out)
lon_out = coords_out.represent_as('unitspherical').lon.to(lon_out_unit).value
lat_out = coords_out.represent_as('unitspherical').lat.to(lat_out_unit).value
return lon_out, lat_out
class FlatSkyToHealpixTransform(object):
"""
A class to perform transformation from a flat sky projection to Healpix optimized to be used for the same
transformation over and over again.
The constructor will pre-compute all needed quantities for the transformation, and the __call__ method just applies
the transformation. This avoids to re-compute the same quantities over and over again.
"""
def __init__(self, wcs_in, coord_system_out, nside, pixels_id, input_shape, order='bilinear', nested=False):
# Look up lon, lat of pixels in output system and convert colatitude theta
# and longitude phi to longitude and latitude.
theta, phi = hp.pix2ang(nside, pixels_id, nested)
lon_out = np.degrees(phi)
lat_out = 90. - np.degrees(theta)
# Convert between celestial coordinates
coord_system_out = _parse_coord_system(coord_system_out)
with np.errstate(invalid='ignore'):
lon_in, lat_in = _convert_world_coordinates(lon_out, lat_out, (coord_system_out, u.deg, u.deg), wcs_in)
# Look up pixels in input system
yinds, xinds = wcs_in.wcs_world2pix(lon_in, lat_in, 0)
self._coords = [xinds, yinds]
# Interpolate
if isinstance(order, six.string_types):
order = ORDER[order]
self._order = order
self._interpolator = FastBilinearInterpolation(input_shape, self._coords)
def __call__(self, data, fill_value=UNSEEN):
# healpix_data = map_coordinates(data, self._coords,
# order=self._order,
# mode='constant', cval=fill_value)
healpix_data = self._interpolator(data)
return healpix_data | en | 0.710189 | # pragma: no cover A class to perform transformation from a flat sky projection to Healpix optimized to be used for the same transformation over and over again. The constructor will pre-compute all needed quantities for the transformation, and the __call__ method just applies the transformation. This avoids to re-compute the same quantities over and over again. # Look up lon, lat of pixels in output system and convert colatitude theta # and longitude phi to longitude and latitude. # Convert between celestial coordinates # Look up pixels in input system # Interpolate # healpix_data = map_coordinates(data, self._coords, # order=self._order, # mode='constant', cval=fill_value) | 2.301408 | 2 |
app/dnt_main.py | fatihy101/detect-and-track | 0 | 6632991 | <filename>app/dnt_main.py
from pyimagesearch.centroidtracker import CentroidTracker
from pyimagesearch.trackableobject import TrackableObject
from imutils.video import FPS
import numpy as np
import imutils
import dlib
import tensorflow.compat.v1 as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
import cv2
import sys
from PyQt5.QtCore import QThread, pyqtSignal, Qt, pyqtSlot, QObject, QRunnable, QThreadPool
from PyQt5.QtGui import QImage, QPixmap
from datetime import datetime
from sign_in.db_connection import *
class Signals(QObject):
changePixmap = pyqtSignal(QImage)
changeTextBox = pyqtSignal(str)
changeButton = pyqtSignal(str)
changeTitleBox = pyqtSignal(str)
class Detection(QRunnable):
def __init__(self):
super(Detection, self).__init__()
self.signals = Signals()
self.stopped = False
self.video_source = None
self.total_elapsed_time = 0
self.totalLeft = 0
self.totalRight = 0
self.enter_position = 'right'
self.model_path = 'model_dir/ssdnet_86k/frozen_inference_graph.pb'
self.label_path = 'model_dir/ssdnet_86k/cow_label_map.pbtxt'
self.num_classes = 1
@pyqtSlot()
def run(self):
self.signals.changeTitleBox.emit(" Sol Toplam\n"
"Sağ Toplam\n"
" Durum")
self.vs = cv2.VideoCapture(self.video_source)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(self.label_path)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=self.num_classes,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
W = None
H = None
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}
totalFrames = 0
skip_frame = 10
fps = FPS().start()
# Operation
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
ret, self.frame = self.vs.read()
if self.frame is None or self.stopped:
print("Video stream ended.")
break
self.frame = imutils.resize(self.frame, width=1000) # Less data we have, faster we are.
rgb = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
self.frame = rgb
if W is None or H is None:
(H, W, ch) = self.frame.shape
self.status = "Bekliyor"
rects = []
if totalFrames % skip_frame == 0:
self.status = "Saptanıyor"
trackers = []
frame_expanded = np.expand_dims(self.frame, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: frame_expanded})
ymin = int((boxes[0][0][0] * H))
xmin = int((boxes[0][0][1] * W))
ymax = int((boxes[0][0][2] * H))
xmax = int((boxes[0][0][3] * W))
box_area = (xmax - xmin) * (ymax - ymin)
total_area = W * H
# For eliminating the false positives.
if box_area > total_area * 0.5:
ymin, xmin, xmax, ymax = (None, None, None, None)
if ymin is not None:
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(xmin, ymin, xmax, ymax)
tracker.start_track(rgb, rect)
trackers.append(tracker)
else:
for tracker in trackers:
self.status = "Takip Ediliyor"
tracker.update(rgb)
pos = tracker.get_position()
xmin = int(pos.left())
ymin = int(pos.top())
xmax = int(pos.right())
ymax = int(pos.bottom())
rects.append((xmin, ymin, xmax, ymax))
# cv2.line(self.frame, (W // 2, 0), (W // 2, H), (0, 255, 255), 2)
objects = ct.update(rects)
for (objectID, centroid) in objects.items():
trackable_obj = trackableObjects.get(objectID, None)
if trackable_obj is None:
trackable_obj = TrackableObject(objectID, centroid)
else:
x = [c[0] for c in trackable_obj.centroids]
direction = centroid[0] - np.mean(x)
trackable_obj.centroids.append(centroid)
if not trackable_obj.counted:
# if the direction is negative (indicating the object
# is moving up) AND the centroid is above the center
# line, count the object
if direction < 0 and centroid[0] < int(W * 0.25):
self.totalLeft += 1
trackable_obj.counted = True
elif direction > 0 and centroid[0] > int(W * 0.75):
self.totalRight += 1
trackable_obj.counted = True
trackableObjects[objectID] = trackable_obj
text = "ID {}".format(objectID)
cv2.putText(self.frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(self.frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
self.signals.changeTextBox.emit(f"{self.totalLeft}\n{self.totalRight}\n{self.status}")
# End of the loop
bytesPerLine = ch * W
convertToQtFormat = QImage(rgb.data, W, H, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(800, 600, Qt.KeepAspectRatio)
self.signals.changePixmap.emit(p)
totalFrames += 1
fps.update()
#
self.signals.changeTitleBox.emit("Durum: ")
# Clear output
self.signals.changeTextBox.emit("Rapor kaydedildi.")
# Alter button to Start.
self.signals.changeButton.emit("start_button")
# Stop FPS count.
fps.stop()
# Get total elapsed time.
self.total_elapsed_time = fps.elapsed()
# Create report to database.
self.create_report(self.totalLeft, self.totalRight, fps.elapsed())
# Finally, set placeholder.
self.signals.changePixmap.emit(QImage('./Resources/placeholder2.png'))
# Format the elapsed time like: 10h 20m 55s
def create_report(self, total_left, total_right, elapsed_time):
db_report = Database()
t = datetime.now()
current_time = t.strftime("%d/%m/%y %H:%M:%S.%f")[:-4]
db_report.insert_report(current_time, self.convert_hour_format(elapsed_time), total_left, total_right,
self.get_id_local(), self.enter_position)
print("create_report: done!")
db_report.cursor.close()
db_report.connection.close()
def get_id_local(self):
platform_name = platform.system()
# For Windows
if platform_name == "Windows":
save_dir = os.getenv('APPDATA')
file_path = save_dir + '\\Provactus\\usr.md'
elif platform_name == "Linux":
file_path = '/var/Provactus/usr.md'
try:
with open(file_path, 'r') as file:
read_file = file.readlines()
uid = read_file[0]
return uid
except FileExistsError:
self.signals.changeTextBox.emit("Raporu kaydederken bir hata oluştu.")
def convert_hour_format(self, second):
minute = int(second / 60)
left_second = second % 60
hour = int(minute / 60)
left_minute = minute % 60
out = f"{hour}:{left_minute}:{int(left_second)}"
return out
| <filename>app/dnt_main.py
from pyimagesearch.centroidtracker import CentroidTracker
from pyimagesearch.trackableobject import TrackableObject
from imutils.video import FPS
import numpy as np
import imutils
import dlib
import tensorflow.compat.v1 as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
import cv2
import sys
from PyQt5.QtCore import QThread, pyqtSignal, Qt, pyqtSlot, QObject, QRunnable, QThreadPool
from PyQt5.QtGui import QImage, QPixmap
from datetime import datetime
from sign_in.db_connection import *
class Signals(QObject):
changePixmap = pyqtSignal(QImage)
changeTextBox = pyqtSignal(str)
changeButton = pyqtSignal(str)
changeTitleBox = pyqtSignal(str)
class Detection(QRunnable):
def __init__(self):
super(Detection, self).__init__()
self.signals = Signals()
self.stopped = False
self.video_source = None
self.total_elapsed_time = 0
self.totalLeft = 0
self.totalRight = 0
self.enter_position = 'right'
self.model_path = 'model_dir/ssdnet_86k/frozen_inference_graph.pb'
self.label_path = 'model_dir/ssdnet_86k/cow_label_map.pbtxt'
self.num_classes = 1
@pyqtSlot()
def run(self):
self.signals.changeTitleBox.emit(" Sol Toplam\n"
"Sağ Toplam\n"
" Durum")
self.vs = cv2.VideoCapture(self.video_source)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(self.label_path)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=self.num_classes,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
W = None
H = None
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}
totalFrames = 0
skip_frame = 10
fps = FPS().start()
# Operation
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
ret, self.frame = self.vs.read()
if self.frame is None or self.stopped:
print("Video stream ended.")
break
self.frame = imutils.resize(self.frame, width=1000) # Less data we have, faster we are.
rgb = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
self.frame = rgb
if W is None or H is None:
(H, W, ch) = self.frame.shape
self.status = "Bekliyor"
rects = []
if totalFrames % skip_frame == 0:
self.status = "Saptanıyor"
trackers = []
frame_expanded = np.expand_dims(self.frame, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: frame_expanded})
ymin = int((boxes[0][0][0] * H))
xmin = int((boxes[0][0][1] * W))
ymax = int((boxes[0][0][2] * H))
xmax = int((boxes[0][0][3] * W))
box_area = (xmax - xmin) * (ymax - ymin)
total_area = W * H
# For eliminating the false positives.
if box_area > total_area * 0.5:
ymin, xmin, xmax, ymax = (None, None, None, None)
if ymin is not None:
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(xmin, ymin, xmax, ymax)
tracker.start_track(rgb, rect)
trackers.append(tracker)
else:
for tracker in trackers:
self.status = "Takip Ediliyor"
tracker.update(rgb)
pos = tracker.get_position()
xmin = int(pos.left())
ymin = int(pos.top())
xmax = int(pos.right())
ymax = int(pos.bottom())
rects.append((xmin, ymin, xmax, ymax))
# cv2.line(self.frame, (W // 2, 0), (W // 2, H), (0, 255, 255), 2)
objects = ct.update(rects)
for (objectID, centroid) in objects.items():
trackable_obj = trackableObjects.get(objectID, None)
if trackable_obj is None:
trackable_obj = TrackableObject(objectID, centroid)
else:
x = [c[0] for c in trackable_obj.centroids]
direction = centroid[0] - np.mean(x)
trackable_obj.centroids.append(centroid)
if not trackable_obj.counted:
# if the direction is negative (indicating the object
# is moving up) AND the centroid is above the center
# line, count the object
if direction < 0 and centroid[0] < int(W * 0.25):
self.totalLeft += 1
trackable_obj.counted = True
elif direction > 0 and centroid[0] > int(W * 0.75):
self.totalRight += 1
trackable_obj.counted = True
trackableObjects[objectID] = trackable_obj
text = "ID {}".format(objectID)
cv2.putText(self.frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(self.frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
self.signals.changeTextBox.emit(f"{self.totalLeft}\n{self.totalRight}\n{self.status}")
# End of the loop
bytesPerLine = ch * W
convertToQtFormat = QImage(rgb.data, W, H, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(800, 600, Qt.KeepAspectRatio)
self.signals.changePixmap.emit(p)
totalFrames += 1
fps.update()
#
self.signals.changeTitleBox.emit("Durum: ")
# Clear output
self.signals.changeTextBox.emit("Rapor kaydedildi.")
# Alter button to Start.
self.signals.changeButton.emit("start_button")
# Stop FPS count.
fps.stop()
# Get total elapsed time.
self.total_elapsed_time = fps.elapsed()
# Create report to database.
self.create_report(self.totalLeft, self.totalRight, fps.elapsed())
# Finally, set placeholder.
self.signals.changePixmap.emit(QImage('./Resources/placeholder2.png'))
# Format the elapsed time like: 10h 20m 55s
def create_report(self, total_left, total_right, elapsed_time):
db_report = Database()
t = datetime.now()
current_time = t.strftime("%d/%m/%y %H:%M:%S.%f")[:-4]
db_report.insert_report(current_time, self.convert_hour_format(elapsed_time), total_left, total_right,
self.get_id_local(), self.enter_position)
print("create_report: done!")
db_report.cursor.close()
db_report.connection.close()
def get_id_local(self):
platform_name = platform.system()
# For Windows
if platform_name == "Windows":
save_dir = os.getenv('APPDATA')
file_path = save_dir + '\\Provactus\\usr.md'
elif platform_name == "Linux":
file_path = '/var/Provactus/usr.md'
try:
with open(file_path, 'r') as file:
read_file = file.readlines()
uid = read_file[0]
return uid
except FileExistsError:
self.signals.changeTextBox.emit("Raporu kaydederken bir hata oluştu.")
def convert_hour_format(self, second):
minute = int(second / 60)
left_second = second % 60
hour = int(minute / 60)
left_minute = minute % 60
out = f"{hour}:{left_minute}:{int(left_second)}"
return out
| en | 0.660435 | # Operation # Less data we have, faster we are. # For eliminating the false positives. # cv2.line(self.frame, (W // 2, 0), (W // 2, H), (0, 255, 255), 2) # if the direction is negative (indicating the object # is moving up) AND the centroid is above the center # line, count the object # End of the loop # # Clear output # Alter button to Start. # Stop FPS count. # Get total elapsed time. # Create report to database. # Finally, set placeholder. # Format the elapsed time like: 10h 20m 55s # For Windows | 2.043574 | 2 |
core/thirdparty/ovf/python/test/simple.py | ddkn/spirit | 92 | 6632992 | import os
import sys
ovf_py_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), ".."))
sys.path.insert(0, ovf_py_dir)
from ovf import ovf
import numpy as np
import unittest
##########
class TestState(unittest.TestCase):
def test_nonexistent(self):
print("----- ovf test nonexistent")
with ovf.ovf_file("nonexistent.ovf") as ovf_file:
print("found: ", ovf_file.found)
print("is_ovf: ", ovf_file.is_ovf)
print("version: ", ovf_file.version)
print("n_segments: ", ovf_file.n_segments)
self.assertTrue( ovf_file.found == False )
self.assertTrue( ovf_file.is_ovf == False )
self.assertTrue( ovf_file.version == 0 )
self.assertTrue( ovf_file.n_segments == 0 )
segment = ovf.ovf_segment()
success = ovf_file.read_segment_header(0, segment)
if success != ovf.OK:
print("read_segment_header failed: ", ovf_file.get_latest_message())
self.assertFalse( success == ovf.OK )
print("----- ovf test nonexistent done")
def test_write(self):
print("----- ovf test writing")
with ovf.ovf_file("testfile_py.ovf") as ovf_file:
data = np.zeros((2, 2, 1, 3), dtype='d')
data[0,1,0,:] = [3.0, 2.0, 1.0]
segment = ovf.ovf_segment(
title="python write test",
comment="more details in this comment...",
valuedim=3,
n_cells=[2,2,1])
success = ovf_file.write_segment(segment, data)
if success != ovf.OK:
print("write_segment failed: ", ovf_file.get_latest_message())
self.assertTrue( success == ovf.OK )
data[0,1,0,:] = [4.0, 5.0, 6.0]
segment.title = "python append test".encode('utf-8')
success = ovf_file.append_segment(segment, data)
if success != ovf.OK:
print("append_segment failed: ", ovf_file.get_latest_message())
self.assertTrue( success == ovf.OK )
print("----- ovf test writing done")
print("----- ovf test reading")
with ovf.ovf_file("testfile_py.ovf") as ovf_file:
print("found: ", ovf_file.found)
print("is_ovf: ", ovf_file.is_ovf)
print("version: ", ovf_file.version)
print("n_segments: ", ovf_file.n_segments)
self.assertTrue( ovf_file.found == True )
self.assertTrue( ovf_file.is_ovf == True )
self.assertTrue( ovf_file.version == 2 )
self.assertTrue( ovf_file.n_segments == 2 )
segment = ovf.ovf_segment()
success = ovf_file.read_segment_header(0, segment)
if success != ovf.OK:
print("read_segment_header failed: ", ovf_file.get_latest_message())
self.assertTrue( success == ovf.OK )
data_shape = (segment.n_cells[0], segment.n_cells[1], segment.n_cells[2], 3)
data = np.zeros(data_shape, dtype='f')
print("data shape: ", data_shape)
success = ovf_file.read_segment_data(0, segment, data)
if success != ovf.OK:
print("read_segment_data failed: ", ovf_file.get_latest_message())
print("first segment: ", data[0,1,0,:])
self.assertTrue( success == ovf.OK )
success = ovf_file.read_segment_header(1, segment)
if success != ovf.OK:
print("read_segment_header failed: ", ovf_file.get_latest_message())
self.assertTrue( success == ovf.OK )
data_shape = (segment.n_cells[0], segment.n_cells[1], segment.n_cells[2], 3)
data = np.zeros(data_shape, dtype='d')
success = ovf_file.read_segment_data(1, segment, data)
if success != ovf.OK:
print("read_segment_data failed: ", ovf_file.get_latest_message())
print("second segment: ", data[0,1,0,:])
self.assertTrue( success == ovf.OK )
print("----- ovf test reading done")
#########
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestState)
success = unittest.TextTestRunner().run(suite).wasSuccessful()
sys.exit(not success) | import os
import sys
ovf_py_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), ".."))
sys.path.insert(0, ovf_py_dir)
from ovf import ovf
import numpy as np
import unittest
##########
class TestState(unittest.TestCase):
def test_nonexistent(self):
print("----- ovf test nonexistent")
with ovf.ovf_file("nonexistent.ovf") as ovf_file:
print("found: ", ovf_file.found)
print("is_ovf: ", ovf_file.is_ovf)
print("version: ", ovf_file.version)
print("n_segments: ", ovf_file.n_segments)
self.assertTrue( ovf_file.found == False )
self.assertTrue( ovf_file.is_ovf == False )
self.assertTrue( ovf_file.version == 0 )
self.assertTrue( ovf_file.n_segments == 0 )
segment = ovf.ovf_segment()
success = ovf_file.read_segment_header(0, segment)
if success != ovf.OK:
print("read_segment_header failed: ", ovf_file.get_latest_message())
self.assertFalse( success == ovf.OK )
print("----- ovf test nonexistent done")
def test_write(self):
print("----- ovf test writing")
with ovf.ovf_file("testfile_py.ovf") as ovf_file:
data = np.zeros((2, 2, 1, 3), dtype='d')
data[0,1,0,:] = [3.0, 2.0, 1.0]
segment = ovf.ovf_segment(
title="python write test",
comment="more details in this comment...",
valuedim=3,
n_cells=[2,2,1])
success = ovf_file.write_segment(segment, data)
if success != ovf.OK:
print("write_segment failed: ", ovf_file.get_latest_message())
self.assertTrue( success == ovf.OK )
data[0,1,0,:] = [4.0, 5.0, 6.0]
segment.title = "python append test".encode('utf-8')
success = ovf_file.append_segment(segment, data)
if success != ovf.OK:
print("append_segment failed: ", ovf_file.get_latest_message())
self.assertTrue( success == ovf.OK )
print("----- ovf test writing done")
print("----- ovf test reading")
with ovf.ovf_file("testfile_py.ovf") as ovf_file:
print("found: ", ovf_file.found)
print("is_ovf: ", ovf_file.is_ovf)
print("version: ", ovf_file.version)
print("n_segments: ", ovf_file.n_segments)
self.assertTrue( ovf_file.found == True )
self.assertTrue( ovf_file.is_ovf == True )
self.assertTrue( ovf_file.version == 2 )
self.assertTrue( ovf_file.n_segments == 2 )
segment = ovf.ovf_segment()
success = ovf_file.read_segment_header(0, segment)
if success != ovf.OK:
print("read_segment_header failed: ", ovf_file.get_latest_message())
self.assertTrue( success == ovf.OK )
data_shape = (segment.n_cells[0], segment.n_cells[1], segment.n_cells[2], 3)
data = np.zeros(data_shape, dtype='f')
print("data shape: ", data_shape)
success = ovf_file.read_segment_data(0, segment, data)
if success != ovf.OK:
print("read_segment_data failed: ", ovf_file.get_latest_message())
print("first segment: ", data[0,1,0,:])
self.assertTrue( success == ovf.OK )
success = ovf_file.read_segment_header(1, segment)
if success != ovf.OK:
print("read_segment_header failed: ", ovf_file.get_latest_message())
self.assertTrue( success == ovf.OK )
data_shape = (segment.n_cells[0], segment.n_cells[1], segment.n_cells[2], 3)
data = np.zeros(data_shape, dtype='d')
success = ovf_file.read_segment_data(1, segment, data)
if success != ovf.OK:
print("read_segment_data failed: ", ovf_file.get_latest_message())
print("second segment: ", data[0,1,0,:])
self.assertTrue( success == ovf.OK )
print("----- ovf test reading done")
#########
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestState)
success = unittest.TextTestRunner().run(suite).wasSuccessful()
sys.exit(not success) | de | 0.934246 | ########## ######### | 2.630263 | 3 |
test/IECoreHoudini/ToHoudiniPolygonsConverter.py | bradleyhenke/cortex | 2 | 6632993 | ##########################################################################
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import imath
import IECore
import IECoreScene
import IECoreHoudini
import unittest
import os
class TestToHoudiniPolygonsConverter( IECoreHoudini.TestCase ) :
__testScene = "test/converterTest.hip"
if hou.applicationVersion()[0] >= 16:
PointPositionAttribs = ['P']
else:
PointPositionAttribs = ['P', 'Pw']
def mesh( self ) :
vertsPerFace = IECore.IntVectorData( [ 4, 4, 4, 4, 4, 4 ] )
vertexIds = IECore.IntVectorData( [ 1, 5, 4, 0, 2, 6, 5, 1, 3, 7, 6, 2, 0, 4, 7, 3, 2, 1, 0, 3, 5, 6, 7, 4 ] )
mesh = IECoreScene.MeshPrimitive( vertsPerFace, vertexIds )
floatData = IECore.FloatData( 1.5 )
v2fData = IECore.V2fData( imath.V2f( 1.5, 2.5 ), IECore.GeometricData.Interpretation.Vector )
v3fData = IECore.V3fData( imath.V3f( 1.5, 2.5, 3.5 ) )
color3fData = IECore.Color3fData( imath.Color3f( 1.5, 2.5, 3.5 ) )
intData = IECore.IntData( 1 )
v2iData = IECore.V2iData( imath.V2i( 1, 2 ) )
v3iData = IECore.V3iData( imath.V3i( 1, 2, 3 ) )
stringData = IECore.StringData( "this is a string" )
intRange = range( 1, 25 )
floatVectorData = IECore.FloatVectorData( [ x+0.5 for x in intRange ] )
v2fVectorData = IECore.V2fVectorData( [ imath.V2f( x, x+0.5 ) for x in intRange ] )
v3fVectorData = IECore.V3fVectorData( [ imath.V3f( x, x+0.5, x+0.75 ) for x in intRange ], IECore.GeometricData.Interpretation.Normal )
color3fVectorData = IECore.Color3fVectorData( [ imath.Color3f( x, x+0.5, x+0.75 ) for x in intRange ] )
intVectorData = IECore.IntVectorData( intRange )
v2iVectorData = IECore.V2iVectorData( [ imath.V2i( x, -x ) for x in intRange ] )
v3iVectorData = IECore.V3iVectorData( [ imath.V3i( x, -x, x*2 ) for x in intRange ] )
stringVectorData = IECore.StringVectorData( [ "string number %06d!" % x for x in intRange ] )
detailInterpolation = IECoreScene.PrimitiveVariable.Interpolation.Constant
pointInterpolation = IECoreScene.PrimitiveVariable.Interpolation.Vertex
primitiveInterpolation = IECoreScene.PrimitiveVariable.Interpolation.Uniform
vertexInterpolation = IECoreScene.PrimitiveVariable.Interpolation.FaceVarying
# add all valid detail attrib types
mesh["floatDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, floatData )
mesh["v2fDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, v2fData )
mesh["v3fDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, v3fData )
mesh["color3fDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, color3fData )
mesh["intDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, intData )
mesh["v2iDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, v2iData )
mesh["v3iDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, v3iData )
mesh["stringDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, stringData )
# add all valid point attrib types
pData = IECore.V3fVectorData( [
imath.V3f( 0, 1, 2 ), imath.V3f( 1 ), imath.V3f( 2 ), imath.V3f( 3 ),
imath.V3f( 4 ), imath.V3f( 5 ), imath.V3f( 6 ), imath.V3f( 7 ),
], IECore.GeometricData.Interpretation.Point )
mesh["P"] = IECoreScene.PrimitiveVariable( pointInterpolation, pData )
mesh["floatPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, floatVectorData[:8] )
mesh["v2fPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, v2fVectorData[:8] )
mesh["v3fPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, v3fVectorData[:8] )
mesh["color3fPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, color3fVectorData[:8] )
mesh["intPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, intVectorData[:8] )
mesh["v2iPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, v2iVectorData[:8] )
mesh["v3iPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, v3iVectorData[:8] )
mesh["stringPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, stringVectorData[:8], IECore.IntVectorData( range( 0, 8 ) ) )
# add all valid primitive attrib types
mesh["floatPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, floatVectorData[:6] )
mesh["v2fPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, v2fVectorData[:6] )
mesh["v3fPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, v3fVectorData[:6] )
mesh["color3fPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, color3fVectorData[:6] )
mesh["intPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, intVectorData[:6] )
mesh["v2iPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, v2iVectorData[:6] )
mesh["v3iPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, v3iVectorData[:6] )
mesh["stringPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, stringVectorData[:6], IECore.IntVectorData( range( 0, 6 ) ) )
# add all valid vertex attrib types
mesh["floatVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, floatVectorData )
mesh["v2fVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, v2fVectorData )
mesh["v3fVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, v3fVectorData )
mesh["color3fVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, color3fVectorData )
mesh["intVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, intVectorData )
mesh["v2iVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, v2iVectorData )
mesh["v3iVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, v3iVectorData )
mesh["stringVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, stringVectorData, IECore.IntVectorData( range( 0, 24 ) ) )
return mesh
def emptySop( self ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
null = geo.createNode( "null" )
return null
def meshSop( self ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
box = geo.createNode( "box" )
facet = box.createOutputNode( "facet" )
facet.parm( "postnml" ).set(True)
return facet
def comparePrimAndSop( self, prim, sop ) :
geo = sop.geometry()
for key in [ "floatDetail", "intDetail", "stringDetail" ] :
self.assertEqual( prim[key].data.value, geo.attribValue( key ) )
for key in [ "v2fDetail", "v3fDetail", "color3fDetail", "v2iDetail", "v3iDetail" ] :
self.assertEqual( tuple(prim[key].data.value), geo.attribValue( key ) )
sopPoints = geo.points()
for key in [ "floatPoint", "intPoint" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPoints[i].attribValue( key ) )
for key in [ "P", "v2fPoint", "v3fPoint", "color3fPoint", "v2iPoint", "v3iPoint" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPoints[i].attribValue( key ) )
data = prim["stringPoint"].data
dataIndices = prim["stringPoint"].indices
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPoints[i].attribValue( "stringPoint" ) )
sopPrims = geo.prims()
self.assertEqual( len(sopPrims), prim.numFaces() )
for key in [ "floatPrim", "intPrim" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPrims[i].attribValue( key ) )
for key in [ "v2fPrim", "v3fPrim", "color3fPrim", "v2iPrim", "v3iPrim" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPrims[i].attribValue( key ) )
data = prim["stringPrim"].data
dataIndices = prim["stringPrim"].indices
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPrims[i].attribValue( "stringPrim" ) )
sopVerts = []
for i in range( 0, len(sopPrims) ) :
verts = list(sopPrims[i].vertices())
self.assertEqual( len(verts), prim.verticesPerFace[i] )
verts.reverse()
sopVerts.extend( verts )
self.assertEqual( len(sopVerts), prim.vertexIds.size() )
for i in range( 0, len(sopVerts) ) :
self.assertEqual( sopVerts[i].point().number(), prim.vertexIds[i] )
for key in [ "floatVert", "intVert" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopVerts[i].attribValue( key ) )
for key in [ "v2fVert", "v3fVert", "color3fVert", "v2iVert", "v3iVert" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopVerts[i].attribValue( key ) )
data = prim["stringVert"].data
dataIndices = prim["stringVert"].indices
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopVerts[i].attribValue( "stringVert" ) )
self.assertTrue( geo.findGlobalAttrib( "v2fDetail" ).isTransformedAsVector() )
self.assertTrue( geo.findPointAttrib( "v3fPoint" ).isTransformedAsNormal() )
self.assertTrue( geo.findPrimAttrib( "v3fPrim" ).isTransformedAsNormal() )
self.assertTrue( geo.findVertexAttrib( "v3fVert" ).isTransformedAsNormal() )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
self.assertEqual( result.verticesPerFace, prim.verticesPerFace )
self.assertEqual( result.vertexIds, prim.vertexIds )
self.assertEqual( result.keys(), prim.keys() )
for key in prim.keys() :
self.assertEqual( result[key], prim[key] )
self.assertEqual( result, prim )
self.assertTrue( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertTrue( result["v2fDetail"].data.getInterpretation(), IECore.GeometricData.Interpretation.Vector )
self.assertTrue( result["v3fPoint"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
self.assertTrue( result["v3fPrim"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
self.assertTrue( result["v3fVert"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
def comparePrimAndAppendedSop( self, prim, sop, origSopPrim, multipleConversions=False ) :
geo = sop.geometry()
for key in [ "floatDetail", "intDetail", "stringDetail", "stringDetail" ] :
self.assertEqual( prim[key].data.value, geo.attribValue( key ) )
for key in [ "v2fDetail", "v3fDetail", "color3fDetail", "v2iDetail", "v3iDetail" ] :
self.assertEqual( tuple(prim[key].data.value), geo.attribValue( key ) )
sopPoints = geo.points()
numPoints = prim.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
origNumPoints = origSopPrim.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( len(sopPoints), origNumPoints + numPoints )
for key in [ "floatPoint", "intPoint" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ 0 ] * origNumPoints
for i in range( 0, origNumPoints ) :
self.assertEqual( defaultValue[i], sopPoints[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPoints[ origNumPoints + i ].attribValue( key ) )
for key in [ "P", "v2fPoint", "v3fPoint", "color3fPoint", "v2iPoint", "v3iPoint" ] :
data = prim[key].data
if multipleConversions or key is "P" :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ [ 0 ] * data[0].dimensions() ] * origNumPoints
for i in range( 0, origNumPoints ) :
self.assertEqual( tuple(defaultValue[i]), sopPoints[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPoints[ origNumPoints + i ].attribValue( key ) )
data = prim["stringPoint"].data
dataIndices = prim["stringPoint"].indices
if multipleConversions :
defaultData = origSopPrim["stringPoint"].data
defaultIndices = origSopPrim["stringPoint"].indices
for i in range( 0, origNumPoints ) :
val = "" if ( defaultIndices[i] >= defaultData.size() ) else defaultData[ defaultIndices[i] ]
self.assertEqual( val, sopPoints[ i ].attribValue( "stringPoint" ) )
else :
defaultValues = [ "" ] * origNumPoints
for i in range( 0, origNumPoints ) :
self.assertEqual( defaultValues[i], sopPoints[ i ].attribValue( "stringPoint" ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPoints[ origNumPoints + i ].attribValue( "stringPoint" ) )
sopPrims = geo.prims()
origNumPrims = origSopPrim.numFaces()
self.assertEqual( len(sopPrims), origNumPrims + prim.numFaces() )
for key in [ "floatPrim", "intPrim" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ 0 ] * origNumPrims
for i in range( 0, origNumPrims ) :
self.assertEqual( defaultValue[i], sopPrims[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPrims[ origNumPrims + i ].attribValue( key ) )
for key in [ "v2fPrim", "v3fPrim", "color3fPrim", "v2iPrim", "v3iPrim" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ [ 0 ] * data[0].dimensions() ] * origNumPrims
for i in range( 0, origNumPrims ) :
self.assertEqual( tuple(defaultValue[i]), sopPrims[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPrims[ origNumPrims + i ].attribValue( key ) )
data = prim["stringPrim"].data
dataIndices = prim["stringPrim"].indices
if multipleConversions :
defaultData = origSopPrim["stringPrim"].data
defaultIndices = origSopPrim["stringPrim"].indices
for i in range( 0, origNumPrims ) :
val = "" if ( defaultIndices[i] >= defaultData.size() ) else defaultData[ defaultIndices[i] ]
self.assertEqual( val, sopPrims[ i ].attribValue( "stringPrim" ) )
else :
defaultValues = [ "" ] * origNumPrims
for i in range( 0, origNumPrims ) :
self.assertEqual( defaultValues[i], sopPrims[ i ].attribValue( "stringPrim" ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPrims[ origNumPrims + i ].attribValue( "stringPrim" ) )
sopVerts = []
for i in range( 0, len(sopPrims) ) :
verts = list(sopPrims[i].vertices())
verts.reverse()
sopVerts.extend( verts )
if i > origNumPrims :
self.assertEqual( len(verts), prim.verticesPerFace[i-origNumPrims] )
origNumVerts = origSopPrim.vertexIds.size()
self.assertEqual( len(sopVerts), origNumVerts + prim.vertexIds.size() )
for i in range( 0, len(prim.vertexIds) ) :
self.assertEqual( sopVerts[origNumVerts+i].point().number() - origNumPoints, prim.vertexIds[i] )
for key in [ "floatVert", "intVert" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ 0 ] * origNumVerts
for i in range( 0, origNumVerts ) :
self.assertEqual( defaultValue[i], sopVerts[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopVerts[ origNumVerts + i ].attribValue( key ) )
for key in [ "v2fVert", "v3fVert", "color3fVert", "v2iVert", "v3iVert" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ [ 0 ] * data[0].dimensions() ] * origNumVerts
for i in range( 0, origNumVerts ) :
self.assertEqual( tuple(defaultValue[i]), sopVerts[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopVerts[ origNumVerts + i ].attribValue( key ) )
data = prim["stringVert"].data
dataIndices = prim["stringVert"].indices
if multipleConversions :
defaultData = origSopPrim["stringVert"].data
defaultIndices = origSopPrim["stringVert"].indices
for i in range( 0, origNumVerts ) :
val = "" if ( defaultIndices[i] >= defaultData.size() ) else defaultData[ defaultIndices[i] ]
self.assertEqual( val, sopVerts[ i ].attribValue( "stringVert" ) )
else :
defaultValues = [ "" ] * origNumVerts
for i in range( 0, origNumVerts ) :
self.assertEqual( defaultValues[i], sopVerts[ i ].attribValue( "stringVert" ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopVerts[ origNumVerts + i ].attribValue( "stringVert" ) )
self.assertTrue( geo.findGlobalAttrib( "v2fDetail" ).isTransformedAsVector() )
self.assertTrue( geo.findPointAttrib( "v3fPoint" ).isTransformedAsNormal() )
self.assertTrue( geo.findPrimAttrib( "v3fPrim" ).isTransformedAsNormal() )
self.assertTrue( geo.findVertexAttrib( "v3fVert" ).isTransformedAsNormal() )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
self.assertEqual( result.verticesPerFace[origNumPrims:], prim.verticesPerFace )
for i in range( 0, len(prim.vertexIds) ) :
self.assertEqual( result.vertexIds[origNumVerts + i], prim.vertexIds[i] + origNumPoints )
for key in prim.keys() :
self.assertTrue( key in result.keys() )
self.assertTrue( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertTrue( result["v2fDetail"].data.getInterpretation(), IECore.GeometricData.Interpretation.Vector )
self.assertTrue( result["v3fPoint"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
self.assertTrue( result["v3fPrim"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
self.assertTrue( result["v3fVert"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
def testCreateConverter( self ) :
converter = IECoreHoudini.ToHoudiniPolygonsConverter( self.mesh() )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.ToHoudiniPolygonsConverter ) ) )
def testFactory( self ) :
converter = IECoreHoudini.ToHoudiniGeometryConverter.create( self.mesh() )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.ToHoudiniPolygonsConverter ) ) )
self.assertTrue( IECoreScene.TypeId.MeshPrimitive in IECoreHoudini.ToHoudiniGeometryConverter.supportedTypes() )
def testConversionIntoEmptySop( self ) :
mesh = self.mesh()
sop = self.emptySop()
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
self.comparePrimAndSop( mesh, sop )
def testConversionIntoExistingSop( self ) :
mesh = self.mesh()
sop = self.meshSop()
orig = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
self.assertNotEqual( orig, mesh )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, False ) )
self.comparePrimAndSop( mesh, sop )
def testAppendingIntoExistingSop( self ) :
mesh = self.mesh()
meshNumPoints = mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
sop = self.meshSop()
orig = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
origNumPoints = orig.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, mesh )
self.assertTrue( not sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
sop.setHardLocked( False )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints )
self.assertTrue( "floatDetail" not in result.keys() )
self.assertTrue( "floatPoint" not in result.keys() )
def testAppendingIntoLockedSop( self ) :
mesh = self.mesh()
meshNumPoints = mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
sop = self.meshSop()
orig = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
origNumPoints = orig.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, mesh )
sop.setHardLocked( True )
self.assertTrue( sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
sop.setHardLocked( False )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints )
self.assertTrue( "floatDetail" not in result.keys() )
self.assertTrue( "floatPoint" not in result.keys() )
def testSaveLoad( self ) :
hou.hipFile.clear( suppress_save_prompt=True )
mesh = self.mesh()
meshNumPoints = mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
sop = self.meshSop()
sopPath = sop.path()
orig = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
origNumPoints = orig.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, mesh )
self.assertTrue( not sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
hou.hipFile.save( TestToHoudiniPolygonsConverter.__testScene )
hou.hipFile.clear( suppress_save_prompt=True )
hou.hipFile.load( TestToHoudiniPolygonsConverter.__testScene )
newSop = hou.node( sopPath )
self.assertTrue( newSop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, newSop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( newSop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
def testSaveLoadWithLockedSop( self ) :
hou.hipFile.clear( suppress_save_prompt=True )
mesh = self.mesh()
meshNumPoints = mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
sop = self.meshSop()
sopPath = sop.path()
orig = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
origNumPoints = orig.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, mesh )
sop.setHardLocked( True )
self.assertTrue( sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
hou.hipFile.save( TestToHoudiniPolygonsConverter.__testScene )
hou.hipFile.clear( suppress_save_prompt=True )
hou.hipFile.load( TestToHoudiniPolygonsConverter.__testScene )
newSop = hou.node( sopPath )
self.assertTrue( newSop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, newSop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( newSop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
def testMultipleConversions( self ) :
mesh = self.mesh()
meshNumPoints = mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
sop = self.meshSop()
orig = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
origNumPoints = orig.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, mesh )
self.assertTrue( not sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
self.assertTrue( sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, result, multipleConversions=True )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + 2*meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
self.assertEqual( result["P"].data[ origNumPoints + meshNumPoints + i ], mesh["P"].data[i] )
self.assertTrue( sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, result, multipleConversions=True )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + 3*meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
self.assertEqual( result["P"].data[ origNumPoints + meshNumPoints + i ], mesh["P"].data[i] )
self.assertEqual( result["P"].data[ origNumPoints + 2*meshNumPoints + i ], mesh["P"].data[i] )
def testObjectWasDeleted( self ) :
mesh = self.mesh()
sop = self.meshSop()
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
self.assertTrue( converter.convert( sop, False ) )
self.comparePrimAndSop( mesh, sop )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
del mesh
sop.setHardLocked( False )
self.assertNotEqual( IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert(), result )
self.assertTrue( converter.convert( sop, False ) )
self.assertEqual( IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert(), result )
def testWithUnacceptablePrimVars( self ) :
mesh = self.mesh()
mesh["badDetail"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.TransformationMatrixfData() )
mesh["badPoint"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.DoubleVectorData( [ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5 ] ) )
mesh["badPrim"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.DoubleVectorData( [ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5 ] ) )
mesh["badVert"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.DoubleVectorData( [ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5 ] ) )
sop = self.emptySop()
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
self.assertTrue( "badDetail" not in [ x.name() for x in sop.geometry().globalAttribs() ] )
self.assertTrue( "badPoint" not in [ x.name() for x in sop.geometry().pointAttribs() ] )
self.assertTrue( "badPrim" not in [ x.name() for x in sop.geometry().primAttribs() ] )
self.assertTrue( "badVert" not in [ x.name() for x in sop.geometry().vertexAttribs() ] )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
self.assertNotEqual( result, mesh )
self.assertTrue( "badDetail" not in result )
self.assertTrue( "badPoint" not in result )
self.assertTrue( "badPrim" not in result )
self.assertTrue( "badVert" not in result )
del mesh["badDetail"]
del mesh["badPoint"]
del mesh["badPrim"]
del mesh["badVert"]
self.comparePrimAndSop( mesh, sop )
def testConvertingOverExistingAttribs( self ) :
mesh = self.mesh()
sop = self.emptySop()
detailAttr = sop.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm( "name" ).set( "floatDetail" )
detailAttr.parm( "class" ).set( 0 ) # detail
detailAttr.parm( "type" ).set( 0 ) # float
detailAttr.parm( "size" ).set( 1 ) # 1 element
detailAttr.parm( "value1" ).set( 123.456 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm( "name" ).set( "floatPoint" )
pointAttr.parm( "class" ).set( 2 ) # point
pointAttr.parm( "type" ).set( 0 ) # float
pointAttr.parm( "size" ).set( 1 ) # 1 element
pointAttr.parm( "value1" ).set( 123.456 )
primAttr = pointAttr.createOutputNode( "attribcreate", exact_type_name=True )
primAttr.parm( "name" ).set( "floatPrim" )
primAttr.parm( "class" ).set( 1 ) # prim
primAttr.parm( "type" ).set( 0 ) # float
primAttr.parm( "size" ).set( 1 ) # 1 element
primAttr.parm( "value1" ).set( 123.456 )
vertexAttr = primAttr.createOutputNode( "attribcreate", exact_type_name=True )
vertexAttr.parm( "name" ).set( "floatVert" )
vertexAttr.parm( "class" ).set( 3 ) # vertex
vertexAttr.parm( "type" ).set( 0 ) # float
vertexAttr.parm( "size" ).set( 1 ) # 1 element
vertexAttr.parm( "value1" ).set( 123.456 )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( vertexAttr ) )
self.comparePrimAndSop( mesh, vertexAttr )
def testConvertingOverExistingAttribsWithDifferentTypes( self ) :
mesh = self.mesh()
sop = self.emptySop()
detailAttr = sop.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm( "name" ).set( "floatDetail" )
detailAttr.parm( "class" ).set( 0 ) # detail
detailAttr.parm( "type" ).set( 1 ) # int
detailAttr.parm( "size" ).set( 3 ) # 3 elements
detailAttr.parm( "value1" ).set( 10 )
detailAttr.parm( "value2" ).set( 11 )
detailAttr.parm( "value3" ).set( 12 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm( "name" ).set( "floatPoint" )
pointAttr.parm( "class" ).set( 2 ) # point
pointAttr.parm( "type" ).set( 1 ) # int
pointAttr.parm( "size" ).set( 3 ) # 3 elements
pointAttr.parm( "value1" ).set( 10 )
pointAttr.parm( "value2" ).set( 11 )
pointAttr.parm( "value3" ).set( 12 )
primAttr = pointAttr.createOutputNode( "attribcreate", exact_type_name=True )
primAttr.parm( "name" ).set( "floatPrim" )
primAttr.parm( "class" ).set( 1 ) # prim
primAttr.parm( "type" ).set( 1 ) # int
primAttr.parm( "size" ).set( 3 ) # 3 elements
primAttr.parm( "value1" ).set( 10 )
primAttr.parm( "value2" ).set( 11 )
primAttr.parm( "value3" ).set( 12 )
vertexAttr = primAttr.createOutputNode( "attribcreate", exact_type_name=True )
vertexAttr.parm( "name" ).set( "floatVert" )
vertexAttr.parm( "class" ).set( 3 ) # vert
vertexAttr.parm( "type" ).set( 1 ) # int
vertexAttr.parm( "size" ).set( 3 ) # 3 elements
vertexAttr.parm( "value1" ).set( 10 )
vertexAttr.parm( "value2" ).set( 11 )
vertexAttr.parm( "value3" ).set( 12 )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( vertexAttr ) )
self.comparePrimAndSop( mesh, vertexAttr )
def testEmptyString( self ) :
mesh = self.mesh()
sop = self.emptySop()
mesh['stringPoint'].data[0] = ""
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
geo = sop.geometry()
sopPoints = geo.points()
data = mesh["stringPoint"].data
dataIndices = mesh["stringPoint"].indices
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPoints[i].attribValue( "stringPoint" ) )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
self.assertEqual( result.verticesPerFace, mesh.verticesPerFace )
self.assertEqual( result.vertexIds, mesh.vertexIds )
self.assertEqual( result.keys(), mesh.keys() )
self.assertEqual( result["stringPoint"], mesh["stringPoint"] )
def testName( self ) :
sop = self.emptySop()
mesh = self.mesh()
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
# unnamed unless we set the parameter
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
self.assertEqual( sop.geometry().findPrimAttrib( "name" ), None )
converter["name"].setTypedValue( "testMesh" )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ "testMesh" ] ) )
self.assertEqual( len([ x for x in geo.prims() if x.attribValue( "name" ) == "testMesh" ]), mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ) )
# blindData still works for backwards compatibility
mesh.blindData()["name"] = IECore.StringData( "blindMesh" )
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ "blindMesh" ] ) )
self.assertEqual( len([ x for x in geo.prims() if x.attribValue( "name" ) == "blindMesh" ]), mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ) )
# name parameter takes preference over blindData
converter["name"].setTypedValue( "testMesh" )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ "testMesh" ] ) )
self.assertEqual( len([ x for x in geo.prims() if x.attribValue( "name" ) == "testMesh" ]), mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ) )
def testAttributeFilter( self ) :
mesh = self.mesh()
sop = self.emptySop()
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
self.assertTrue( converter.convert( sop ) )
self.assertItemsEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), TestToHoudiniPolygonsConverter.PointPositionAttribs + ['color3fPoint', 'floatPoint', 'intPoint', 'stringPoint', 'v2fPoint', 'v2iPoint', 'v3fPoint', 'v3iPoint'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['color3fPrim', 'floatPrim', 'ieMeshInterpolation', 'intPrim', 'stringPrim', 'v2fPrim', 'v2iPrim', 'v3fPrim', 'v3iPrim'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), ['color3fVert', 'floatVert', 'intVert', 'stringVert', 'v2fVert', 'v2iVert', 'v3fVert', 'v3iVert'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), ['color3fDetail', 'floatDetail', 'intDetail', 'stringDetail', 'v2fDetail', 'v2iDetail', 'v3fDetail', 'v3iDetail'] )
converter.parameters()["attributeFilter"].setTypedValue( "P *3f*" )
self.assertTrue( converter.convert( sop ) )
self.assertItemsEqual( [ x.name() for x in sop.geometry().pointAttribs() ], TestToHoudiniPolygonsConverter.PointPositionAttribs + ['color3fPoint', 'v3fPoint'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['color3fPrim', 'ieMeshInterpolation', 'v3fPrim'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), ['color3fVert', 'v3fVert'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), ['color3fDetail', 'v3fDetail'] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^*Detail ^int*" )
self.assertTrue( converter.convert( sop ) )
self.assertItemsEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), TestToHoudiniPolygonsConverter.PointPositionAttribs + ['color3fPoint', 'floatPoint', 'stringPoint', 'v2fPoint', 'v2iPoint', 'v3fPoint', 'v3iPoint'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['color3fPrim', 'floatPrim', 'ieMeshInterpolation', 'stringPrim', 'v2fPrim', 'v2iPrim', 'v3fPrim', 'v3iPrim'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), ['color3fVert', 'floatVert', 'stringVert', 'v2fVert', 'v2iVert', 'v3fVert', 'v3iVert'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
# verify we can filter uvs
mesh = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
mesh = IECoreScene.MeshAlgo.triangulate( mesh )
IECoreScene.MeshNormalsOp()( input=mesh, copyInput=False )
mesh["Cs"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.V3fVectorData( [ imath.V3f( 1, 0, 0 ) ] * 6, IECore.GeometricData.Interpretation.Color ) )
mesh["width"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1 ] * 4 ) )
mesh["Pref"] = mesh["P"]
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
converter.parameters()["attributeFilter"].setTypedValue( "*" )
self.assertTrue( converter.convert( sop ) )
self.assertItemsEqual( [ x.name() for x in sop.geometry().pointAttribs() ], TestToHoudiniPolygonsConverter.PointPositionAttribs + ['N', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['ieMeshInterpolation', ] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
# have to filter the source attrs
converter.parameters()["attributeFilter"].setTypedValue( "* ^uv ^pscale ^rest" )
self.assertTrue( converter.convert( sop ) )
self.assertItemsEqual( [ x.name() for x in sop.geometry().pointAttribs() ], TestToHoudiniPolygonsConverter.PointPositionAttribs + ['N', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['ieMeshInterpolation', ] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), ['Cd'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^width ^Pref" )
self.assertTrue( converter.convert( sop ) )
self.assertItemsEqual( [ x.name() for x in sop.geometry().pointAttribs() ], TestToHoudiniPolygonsConverter.PointPositionAttribs + ['N'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['ieMeshInterpolation', ] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
def testStandardAttributeConversion( self ) :
sop = self.emptySop()
mesh = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
mesh = IECoreScene.MeshAlgo.triangulate( mesh )
IECoreScene.MeshNormalsOp()( input=mesh, copyInput=False )
mesh["Cs"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.V3fVectorData( [ imath.V3f( 1, 0, 0 ) ] * 6, IECore.GeometricData.Interpretation.Color ) )
mesh["width"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1 ] * 4 ) )
mesh["Pref"] = mesh["P"]
self.assertTrue( mesh.arePrimitiveVariablesValid() )
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
self.assertItemsEqual( [ x.name() for x in geo.pointAttribs() ], TestToHoudiniPolygonsConverter.PointPositionAttribs + ['N', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in geo.primAttribs() ]), ['ieMeshInterpolation'] )
self.assertEqual( sorted([ x.name() for x in geo.vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in geo.globalAttribs() ]), [] )
uvData = mesh["uv"].data
indices = mesh["uv"].indices
uvs = geo.findVertexAttrib( "uv" )
i = 0
for prim in geo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( uvs )
self.assertAlmostEqual( uvValues[0], uvData[indices[i]][0] )
self.assertAlmostEqual( uvValues[1], uvData[indices[i]][1] )
i += 1
converter["convertStandardAttributes"].setTypedValue( False )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
self.assertItemsEqual( [ x.name() for x in geo.pointAttribs() ], TestToHoudiniPolygonsConverter.PointPositionAttribs + ['N', 'Pref', 'width'] )
self.assertEqual( sorted([ x.name() for x in geo.primAttribs() ]), ['ieMeshInterpolation', ] )
self.assertEqual( sorted([ x.name() for x in geo.vertexAttribs() ]), ['Cs', 'uv'] )
self.assertEqual( sorted([ x.name() for x in geo.globalAttribs() ]), [] )
uvs = geo.findVertexAttrib( "uv" )
i = 0
for prim in geo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( uvs )
self.assertAlmostEqual( uvValues[0], uvData[indices[i]][0] )
self.assertAlmostEqual( uvValues[1], uvData[indices[i]][1] )
i += 1
def testCannotTransformRest( self ) :
sop = self.emptySop()
mergeGeo = hou.node( "/obj" ).createNode( "geo", run_init_scripts=False )
mergeGeo.parm( "tx" ).set( 10 )
merge = mergeGeo.createNode( "object_merge" )
merge.parm( "xformtype" ).set( 1 )
merge.parm( "objpath1" ).set( sop.path() )
mesh = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
mesh = IECoreScene.MeshAlgo.triangulate( mesh )
IECoreScene.MeshNormalsOp()( input=mesh, copyInput=False )
mesh["Pref"] = mesh["P"]
prefData = mesh["Pref"].data
self.assertTrue( mesh.arePrimitiveVariablesValid() )
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
geo2 = merge.geometry()
i = 0
for point in geo.points() :
restValue = point.attribValue( "rest" )
self.assertAlmostEqual( imath.V3f( restValue[0], restValue[1], restValue[2] ), prefData[i] )
self.assertTrue( point.position().isAlmostEqual( hou.Vector3(restValue) ) )
i += 1
i = 0
for point in geo2.points() :
restValue = point.attribValue( "rest" )
self.assertAlmostEqual( imath.V3f( restValue[0], restValue[1], restValue[2] ), prefData[i] )
self.assertFalse( point.position().isAlmostEqual( hou.Vector3(restValue) ) )
i += 1
# Pref shouldn't transform either
converter["convertStandardAttributes"].setTypedValue( False )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
geo2 = merge.geometry()
i = 0
for point in geo.points() :
restValue = point.attribValue( "Pref" )
self.assertAlmostEqual( imath.V3f( restValue[0], restValue[1], restValue[2] ), prefData[i] )
self.assertTrue( point.position().isAlmostEqual( hou.Vector3(restValue) ) )
i += 1
i = 0
for point in geo2.points() :
restValue = point.attribValue( "Pref" )
self.assertAlmostEqual( imath.V3f( restValue[0], restValue[1], restValue[2] ), prefData[i] )
self.assertFalse( point.position().isAlmostEqual( hou.Vector3(restValue) ) )
i += 1
def testInterpolation( self ) :
mesh = self.mesh()
sop = self.emptySop()
self.assertEqual( mesh.interpolation, "linear" )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
self.assertTrue( "ieMeshInterpolation" in [ x.name() for x in sop.geometry().primAttribs() ] )
attrib = sop.geometry().findPrimAttrib( "ieMeshInterpolation" )
for prim in sop.geometry().prims() :
self.assertEqual( prim.attribValue( attrib ), "poly" )
mesh.interpolation = "catmullClark"
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
self.assertTrue( "ieMeshInterpolation" in [ x.name() for x in sop.geometry().primAttribs() ] )
attrib = sop.geometry().findPrimAttrib( "ieMeshInterpolation" )
for prim in sop.geometry().prims() :
self.assertEqual( prim.attribValue( attrib ), "subdiv" )
def testExpandedUVRoundTrip( self ) :
mesh = IECore.Reader.create( "test/IECore/data/cobFiles/twoTrianglesWithSharedUVs.cob" ).read()
mesh["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, mesh["uv"].expandedData(), None )
mesh["uv"].indices = None
uvData = mesh["uv"].data
sop = self.emptySop()
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
geo = sop.geometry()
self.assertTrue( "uv" in [ x.name() for x in geo.vertexAttribs() ] )
uvs = geo.findVertexAttrib( "uv" )
i = 0
for prim in geo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( uvs )
self.assertAlmostEqual( uvValues[0], uvData[i][0] )
self.assertAlmostEqual( uvValues[1], uvData[i][1] )
i += 1
converter = IECoreHoudini.FromHoudiniPolygonsConverter( sop )
result = converter.convert()
self.assertEqual( result["uv"].data.getInterpretation(), IECore.GeometricData.Interpretation.UV )
# we cannot guarantee to generate the same data when extracting from Houdini
# because we always generate indices, but we can generate correctly indexed data
self.assertEqual( result["uv"].data.size(), 4 )
self.assertEqual( result["uv"].indices.size(), 6 )
for i in range( 0, mesh.variableSize( mesh["uv"].interpolation ) ) :
self.assertEqual( mesh["uv"].data[i], result["uv"].data[ result["uv"].indices[i] ] )
def testIndexedUVRoundTrip( self ) :
mesh = IECore.Reader.create( "test/IECore/data/cobFiles/twoTrianglesWithSharedUVs.cob" ).read()
uvData = mesh["uv"].data
uvIndices = mesh["uv"].indices
sop = self.emptySop()
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
geo = sop.geometry()
self.assertTrue( "uv" in [ x.name() for x in geo.vertexAttribs() ] )
uvs = geo.findVertexAttrib( "uv" )
i = 0
for prim in geo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( uvs )
self.assertAlmostEqual( uvValues[0], uvData[uvIndices[i]][0] )
self.assertAlmostEqual( uvValues[1], uvData[uvIndices[i]][1] )
i += 1
converter = IECoreHoudini.FromHoudiniPolygonsConverter( sop )
result = converter.convert()
self.assertEqual( result["uv"].data.getInterpretation(), IECore.GeometricData.Interpretation.UV )
# we cannot guarantee to generate the same indices when extracting from Houdini
# nor the same data, but we can generate correctly indexed data
self.assertEqual( result["uv"].data.size(), 4 )
self.assertEqual( result["uv"].indices.size(), 6 )
for i in range( 0, mesh.variableSize( mesh["uv"].interpolation ) ) :
self.assertEqual( mesh["uv"].data[ mesh["uv"].indices[i] ], result["uv"].data[ result["uv"].indices[i] ] )
def testCornersAndCreases( self ) :
mesh = IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) )
# normals and UVs complicate the testing, and we don't need them to verify corners and creases
del mesh["N"]
del mesh["uv"]
cornerIds = [ 5 ]
cornerSharpnesses = [ 10.0 ]
mesh.setCorners( IECore.IntVectorData( cornerIds ), IECore.FloatVectorData( cornerSharpnesses ) )
creaseLengths = [ 3, 2 ]
creaseIds = [ 1, 2, 3, 4, 5 ] # note that these are vertex ids
creaseSharpnesses = [ 1, 5 ]
mesh.setCreases( IECore.IntVectorData( creaseLengths ), IECore.IntVectorData( creaseIds ), IECore.FloatVectorData( creaseSharpnesses ) )
sop = self.emptySop()
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
geo = sop.geometry()
self.assertTrue( "cornerweight" in [ x.name() for x in geo.pointAttribs() ] )
self.assertTrue( "creaseweight" in [ x.name() for x in geo.vertexAttribs() ] )
# test corners
cornerWeight = geo.findPointAttrib( "cornerweight" )
for point in geo.points() :
sharpness = 0.0
if point.number() in cornerIds :
sharpness = cornerSharpnesses[ cornerIds.index( point.number() ) ]
self.assertEqual( point.attribValue( cornerWeight ), sharpness )
# test creases
expectedSharpnesses = [ 0 ] * 24
# edge 1-2
expectedSharpnesses[1] = 1
expectedSharpnesses[2] = 1
# edge 2-3
expectedSharpnesses[6] = 1
expectedSharpnesses[18] = 1
# edge 4-5
expectedSharpnesses[4] = 5
expectedSharpnesses[10] = 5
self.assertEqual( list(geo.vertexFloatAttribValues( "creaseweight" )), expectedSharpnesses )
# make sure it round trips well enough
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
self.assertEqual( result.cornerIds(), mesh.cornerIds() )
self.assertEqual( result.cornerSharpnesses(), mesh.cornerSharpnesses() )
self.assertEqual( result.creaseLengths(), IECore.IntVectorData( [ 2, 2, 2 ] ) )
self.assertEqual( result.creaseIds(), IECore.IntVectorData( [ 2, 3, 1, 2, 4, 5 ] ) )
self.assertEqual( result.creaseSharpnesses(), IECore.FloatVectorData( [ 1, 1, 5 ] ) )
# if we re-align result creases, everything else is an exact match
mesh.setCreases( result.creaseLengths(), result.creaseIds(), result.creaseSharpnesses() )
self.assertEqual( result, mesh )
def tearDown( self ) :
if os.path.isfile( TestToHoudiniPolygonsConverter.__testScene ) :
os.remove( TestToHoudiniPolygonsConverter.__testScene )
if __name__ == "__main__":
unittest.main()
| ##########################################################################
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import imath
import IECore
import IECoreScene
import IECoreHoudini
import unittest
import os
class TestToHoudiniPolygonsConverter( IECoreHoudini.TestCase ) :
__testScene = "test/converterTest.hip"
if hou.applicationVersion()[0] >= 16:
PointPositionAttribs = ['P']
else:
PointPositionAttribs = ['P', 'Pw']
def mesh( self ) :
vertsPerFace = IECore.IntVectorData( [ 4, 4, 4, 4, 4, 4 ] )
vertexIds = IECore.IntVectorData( [ 1, 5, 4, 0, 2, 6, 5, 1, 3, 7, 6, 2, 0, 4, 7, 3, 2, 1, 0, 3, 5, 6, 7, 4 ] )
mesh = IECoreScene.MeshPrimitive( vertsPerFace, vertexIds )
floatData = IECore.FloatData( 1.5 )
v2fData = IECore.V2fData( imath.V2f( 1.5, 2.5 ), IECore.GeometricData.Interpretation.Vector )
v3fData = IECore.V3fData( imath.V3f( 1.5, 2.5, 3.5 ) )
color3fData = IECore.Color3fData( imath.Color3f( 1.5, 2.5, 3.5 ) )
intData = IECore.IntData( 1 )
v2iData = IECore.V2iData( imath.V2i( 1, 2 ) )
v3iData = IECore.V3iData( imath.V3i( 1, 2, 3 ) )
stringData = IECore.StringData( "this is a string" )
intRange = range( 1, 25 )
floatVectorData = IECore.FloatVectorData( [ x+0.5 for x in intRange ] )
v2fVectorData = IECore.V2fVectorData( [ imath.V2f( x, x+0.5 ) for x in intRange ] )
v3fVectorData = IECore.V3fVectorData( [ imath.V3f( x, x+0.5, x+0.75 ) for x in intRange ], IECore.GeometricData.Interpretation.Normal )
color3fVectorData = IECore.Color3fVectorData( [ imath.Color3f( x, x+0.5, x+0.75 ) for x in intRange ] )
intVectorData = IECore.IntVectorData( intRange )
v2iVectorData = IECore.V2iVectorData( [ imath.V2i( x, -x ) for x in intRange ] )
v3iVectorData = IECore.V3iVectorData( [ imath.V3i( x, -x, x*2 ) for x in intRange ] )
stringVectorData = IECore.StringVectorData( [ "string number %06d!" % x for x in intRange ] )
detailInterpolation = IECoreScene.PrimitiveVariable.Interpolation.Constant
pointInterpolation = IECoreScene.PrimitiveVariable.Interpolation.Vertex
primitiveInterpolation = IECoreScene.PrimitiveVariable.Interpolation.Uniform
vertexInterpolation = IECoreScene.PrimitiveVariable.Interpolation.FaceVarying
# add all valid detail attrib types
mesh["floatDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, floatData )
mesh["v2fDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, v2fData )
mesh["v3fDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, v3fData )
mesh["color3fDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, color3fData )
mesh["intDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, intData )
mesh["v2iDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, v2iData )
mesh["v3iDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, v3iData )
mesh["stringDetail"] = IECoreScene.PrimitiveVariable( detailInterpolation, stringData )
# add all valid point attrib types
pData = IECore.V3fVectorData( [
imath.V3f( 0, 1, 2 ), imath.V3f( 1 ), imath.V3f( 2 ), imath.V3f( 3 ),
imath.V3f( 4 ), imath.V3f( 5 ), imath.V3f( 6 ), imath.V3f( 7 ),
], IECore.GeometricData.Interpretation.Point )
mesh["P"] = IECoreScene.PrimitiveVariable( pointInterpolation, pData )
mesh["floatPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, floatVectorData[:8] )
mesh["v2fPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, v2fVectorData[:8] )
mesh["v3fPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, v3fVectorData[:8] )
mesh["color3fPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, color3fVectorData[:8] )
mesh["intPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, intVectorData[:8] )
mesh["v2iPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, v2iVectorData[:8] )
mesh["v3iPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, v3iVectorData[:8] )
mesh["stringPoint"] = IECoreScene.PrimitiveVariable( pointInterpolation, stringVectorData[:8], IECore.IntVectorData( range( 0, 8 ) ) )
# add all valid primitive attrib types
mesh["floatPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, floatVectorData[:6] )
mesh["v2fPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, v2fVectorData[:6] )
mesh["v3fPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, v3fVectorData[:6] )
mesh["color3fPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, color3fVectorData[:6] )
mesh["intPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, intVectorData[:6] )
mesh["v2iPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, v2iVectorData[:6] )
mesh["v3iPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, v3iVectorData[:6] )
mesh["stringPrim"] = IECoreScene.PrimitiveVariable( primitiveInterpolation, stringVectorData[:6], IECore.IntVectorData( range( 0, 6 ) ) )
# add all valid vertex attrib types
mesh["floatVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, floatVectorData )
mesh["v2fVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, v2fVectorData )
mesh["v3fVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, v3fVectorData )
mesh["color3fVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, color3fVectorData )
mesh["intVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, intVectorData )
mesh["v2iVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, v2iVectorData )
mesh["v3iVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, v3iVectorData )
mesh["stringVert"] = IECoreScene.PrimitiveVariable( vertexInterpolation, stringVectorData, IECore.IntVectorData( range( 0, 24 ) ) )
return mesh
def emptySop( self ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
null = geo.createNode( "null" )
return null
def meshSop( self ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
box = geo.createNode( "box" )
facet = box.createOutputNode( "facet" )
facet.parm( "postnml" ).set(True)
return facet
def comparePrimAndSop( self, prim, sop ) :
geo = sop.geometry()
for key in [ "floatDetail", "intDetail", "stringDetail" ] :
self.assertEqual( prim[key].data.value, geo.attribValue( key ) )
for key in [ "v2fDetail", "v3fDetail", "color3fDetail", "v2iDetail", "v3iDetail" ] :
self.assertEqual( tuple(prim[key].data.value), geo.attribValue( key ) )
sopPoints = geo.points()
for key in [ "floatPoint", "intPoint" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPoints[i].attribValue( key ) )
for key in [ "P", "v2fPoint", "v3fPoint", "color3fPoint", "v2iPoint", "v3iPoint" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPoints[i].attribValue( key ) )
data = prim["stringPoint"].data
dataIndices = prim["stringPoint"].indices
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPoints[i].attribValue( "stringPoint" ) )
sopPrims = geo.prims()
self.assertEqual( len(sopPrims), prim.numFaces() )
for key in [ "floatPrim", "intPrim" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPrims[i].attribValue( key ) )
for key in [ "v2fPrim", "v3fPrim", "color3fPrim", "v2iPrim", "v3iPrim" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPrims[i].attribValue( key ) )
data = prim["stringPrim"].data
dataIndices = prim["stringPrim"].indices
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPrims[i].attribValue( "stringPrim" ) )
sopVerts = []
for i in range( 0, len(sopPrims) ) :
verts = list(sopPrims[i].vertices())
self.assertEqual( len(verts), prim.verticesPerFace[i] )
verts.reverse()
sopVerts.extend( verts )
self.assertEqual( len(sopVerts), prim.vertexIds.size() )
for i in range( 0, len(sopVerts) ) :
self.assertEqual( sopVerts[i].point().number(), prim.vertexIds[i] )
for key in [ "floatVert", "intVert" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopVerts[i].attribValue( key ) )
for key in [ "v2fVert", "v3fVert", "color3fVert", "v2iVert", "v3iVert" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopVerts[i].attribValue( key ) )
data = prim["stringVert"].data
dataIndices = prim["stringVert"].indices
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopVerts[i].attribValue( "stringVert" ) )
self.assertTrue( geo.findGlobalAttrib( "v2fDetail" ).isTransformedAsVector() )
self.assertTrue( geo.findPointAttrib( "v3fPoint" ).isTransformedAsNormal() )
self.assertTrue( geo.findPrimAttrib( "v3fPrim" ).isTransformedAsNormal() )
self.assertTrue( geo.findVertexAttrib( "v3fVert" ).isTransformedAsNormal() )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
self.assertEqual( result.verticesPerFace, prim.verticesPerFace )
self.assertEqual( result.vertexIds, prim.vertexIds )
self.assertEqual( result.keys(), prim.keys() )
for key in prim.keys() :
self.assertEqual( result[key], prim[key] )
self.assertEqual( result, prim )
self.assertTrue( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertTrue( result["v2fDetail"].data.getInterpretation(), IECore.GeometricData.Interpretation.Vector )
self.assertTrue( result["v3fPoint"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
self.assertTrue( result["v3fPrim"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
self.assertTrue( result["v3fVert"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
def comparePrimAndAppendedSop( self, prim, sop, origSopPrim, multipleConversions=False ) :
geo = sop.geometry()
for key in [ "floatDetail", "intDetail", "stringDetail", "stringDetail" ] :
self.assertEqual( prim[key].data.value, geo.attribValue( key ) )
for key in [ "v2fDetail", "v3fDetail", "color3fDetail", "v2iDetail", "v3iDetail" ] :
self.assertEqual( tuple(prim[key].data.value), geo.attribValue( key ) )
sopPoints = geo.points()
numPoints = prim.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
origNumPoints = origSopPrim.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( len(sopPoints), origNumPoints + numPoints )
for key in [ "floatPoint", "intPoint" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ 0 ] * origNumPoints
for i in range( 0, origNumPoints ) :
self.assertEqual( defaultValue[i], sopPoints[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPoints[ origNumPoints + i ].attribValue( key ) )
for key in [ "P", "v2fPoint", "v3fPoint", "color3fPoint", "v2iPoint", "v3iPoint" ] :
data = prim[key].data
if multipleConversions or key is "P" :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ [ 0 ] * data[0].dimensions() ] * origNumPoints
for i in range( 0, origNumPoints ) :
self.assertEqual( tuple(defaultValue[i]), sopPoints[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPoints[ origNumPoints + i ].attribValue( key ) )
data = prim["stringPoint"].data
dataIndices = prim["stringPoint"].indices
if multipleConversions :
defaultData = origSopPrim["stringPoint"].data
defaultIndices = origSopPrim["stringPoint"].indices
for i in range( 0, origNumPoints ) :
val = "" if ( defaultIndices[i] >= defaultData.size() ) else defaultData[ defaultIndices[i] ]
self.assertEqual( val, sopPoints[ i ].attribValue( "stringPoint" ) )
else :
defaultValues = [ "" ] * origNumPoints
for i in range( 0, origNumPoints ) :
self.assertEqual( defaultValues[i], sopPoints[ i ].attribValue( "stringPoint" ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPoints[ origNumPoints + i ].attribValue( "stringPoint" ) )
sopPrims = geo.prims()
origNumPrims = origSopPrim.numFaces()
self.assertEqual( len(sopPrims), origNumPrims + prim.numFaces() )
for key in [ "floatPrim", "intPrim" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ 0 ] * origNumPrims
for i in range( 0, origNumPrims ) :
self.assertEqual( defaultValue[i], sopPrims[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPrims[ origNumPrims + i ].attribValue( key ) )
for key in [ "v2fPrim", "v3fPrim", "color3fPrim", "v2iPrim", "v3iPrim" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ [ 0 ] * data[0].dimensions() ] * origNumPrims
for i in range( 0, origNumPrims ) :
self.assertEqual( tuple(defaultValue[i]), sopPrims[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPrims[ origNumPrims + i ].attribValue( key ) )
data = prim["stringPrim"].data
dataIndices = prim["stringPrim"].indices
if multipleConversions :
defaultData = origSopPrim["stringPrim"].data
defaultIndices = origSopPrim["stringPrim"].indices
for i in range( 0, origNumPrims ) :
val = "" if ( defaultIndices[i] >= defaultData.size() ) else defaultData[ defaultIndices[i] ]
self.assertEqual( val, sopPrims[ i ].attribValue( "stringPrim" ) )
else :
defaultValues = [ "" ] * origNumPrims
for i in range( 0, origNumPrims ) :
self.assertEqual( defaultValues[i], sopPrims[ i ].attribValue( "stringPrim" ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPrims[ origNumPrims + i ].attribValue( "stringPrim" ) )
sopVerts = []
for i in range( 0, len(sopPrims) ) :
verts = list(sopPrims[i].vertices())
verts.reverse()
sopVerts.extend( verts )
if i > origNumPrims :
self.assertEqual( len(verts), prim.verticesPerFace[i-origNumPrims] )
origNumVerts = origSopPrim.vertexIds.size()
self.assertEqual( len(sopVerts), origNumVerts + prim.vertexIds.size() )
for i in range( 0, len(prim.vertexIds) ) :
self.assertEqual( sopVerts[origNumVerts+i].point().number() - origNumPoints, prim.vertexIds[i] )
for key in [ "floatVert", "intVert" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ 0 ] * origNumVerts
for i in range( 0, origNumVerts ) :
self.assertEqual( defaultValue[i], sopVerts[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopVerts[ origNumVerts + i ].attribValue( key ) )
for key in [ "v2fVert", "v3fVert", "color3fVert", "v2iVert", "v3iVert" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ [ 0 ] * data[0].dimensions() ] * origNumVerts
for i in range( 0, origNumVerts ) :
self.assertEqual( tuple(defaultValue[i]), sopVerts[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopVerts[ origNumVerts + i ].attribValue( key ) )
data = prim["stringVert"].data
dataIndices = prim["stringVert"].indices
if multipleConversions :
defaultData = origSopPrim["stringVert"].data
defaultIndices = origSopPrim["stringVert"].indices
for i in range( 0, origNumVerts ) :
val = "" if ( defaultIndices[i] >= defaultData.size() ) else defaultData[ defaultIndices[i] ]
self.assertEqual( val, sopVerts[ i ].attribValue( "stringVert" ) )
else :
defaultValues = [ "" ] * origNumVerts
for i in range( 0, origNumVerts ) :
self.assertEqual( defaultValues[i], sopVerts[ i ].attribValue( "stringVert" ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopVerts[ origNumVerts + i ].attribValue( "stringVert" ) )
self.assertTrue( geo.findGlobalAttrib( "v2fDetail" ).isTransformedAsVector() )
self.assertTrue( geo.findPointAttrib( "v3fPoint" ).isTransformedAsNormal() )
self.assertTrue( geo.findPrimAttrib( "v3fPrim" ).isTransformedAsNormal() )
self.assertTrue( geo.findVertexAttrib( "v3fVert" ).isTransformedAsNormal() )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
self.assertEqual( result.verticesPerFace[origNumPrims:], prim.verticesPerFace )
for i in range( 0, len(prim.vertexIds) ) :
self.assertEqual( result.vertexIds[origNumVerts + i], prim.vertexIds[i] + origNumPoints )
for key in prim.keys() :
self.assertTrue( key in result.keys() )
self.assertTrue( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertTrue( result["v2fDetail"].data.getInterpretation(), IECore.GeometricData.Interpretation.Vector )
self.assertTrue( result["v3fPoint"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
self.assertTrue( result["v3fPrim"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
self.assertTrue( result["v3fVert"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
def testCreateConverter( self ) :
converter = IECoreHoudini.ToHoudiniPolygonsConverter( self.mesh() )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.ToHoudiniPolygonsConverter ) ) )
def testFactory( self ) :
converter = IECoreHoudini.ToHoudiniGeometryConverter.create( self.mesh() )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.ToHoudiniPolygonsConverter ) ) )
self.assertTrue( IECoreScene.TypeId.MeshPrimitive in IECoreHoudini.ToHoudiniGeometryConverter.supportedTypes() )
def testConversionIntoEmptySop( self ) :
mesh = self.mesh()
sop = self.emptySop()
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
self.comparePrimAndSop( mesh, sop )
def testConversionIntoExistingSop( self ) :
mesh = self.mesh()
sop = self.meshSop()
orig = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
self.assertNotEqual( orig, mesh )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, False ) )
self.comparePrimAndSop( mesh, sop )
def testAppendingIntoExistingSop( self ) :
mesh = self.mesh()
meshNumPoints = mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
sop = self.meshSop()
orig = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
origNumPoints = orig.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, mesh )
self.assertTrue( not sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
sop.setHardLocked( False )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints )
self.assertTrue( "floatDetail" not in result.keys() )
self.assertTrue( "floatPoint" not in result.keys() )
def testAppendingIntoLockedSop( self ) :
mesh = self.mesh()
meshNumPoints = mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
sop = self.meshSop()
orig = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
origNumPoints = orig.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, mesh )
sop.setHardLocked( True )
self.assertTrue( sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
sop.setHardLocked( False )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints )
self.assertTrue( "floatDetail" not in result.keys() )
self.assertTrue( "floatPoint" not in result.keys() )
def testSaveLoad( self ) :
hou.hipFile.clear( suppress_save_prompt=True )
mesh = self.mesh()
meshNumPoints = mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
sop = self.meshSop()
sopPath = sop.path()
orig = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
origNumPoints = orig.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, mesh )
self.assertTrue( not sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
hou.hipFile.save( TestToHoudiniPolygonsConverter.__testScene )
hou.hipFile.clear( suppress_save_prompt=True )
hou.hipFile.load( TestToHoudiniPolygonsConverter.__testScene )
newSop = hou.node( sopPath )
self.assertTrue( newSop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, newSop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( newSop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
def testSaveLoadWithLockedSop( self ) :
hou.hipFile.clear( suppress_save_prompt=True )
mesh = self.mesh()
meshNumPoints = mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
sop = self.meshSop()
sopPath = sop.path()
orig = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
origNumPoints = orig.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, mesh )
sop.setHardLocked( True )
self.assertTrue( sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
hou.hipFile.save( TestToHoudiniPolygonsConverter.__testScene )
hou.hipFile.clear( suppress_save_prompt=True )
hou.hipFile.load( TestToHoudiniPolygonsConverter.__testScene )
newSop = hou.node( sopPath )
self.assertTrue( newSop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, newSop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( newSop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
def testMultipleConversions( self ) :
mesh = self.mesh()
meshNumPoints = mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
sop = self.meshSop()
orig = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
origNumPoints = orig.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, mesh )
self.assertTrue( not sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, orig )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
self.assertTrue( sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, result, multipleConversions=True )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + 2*meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
self.assertEqual( result["P"].data[ origNumPoints + meshNumPoints + i ], mesh["P"].data[i] )
self.assertTrue( sop.isHardLocked() )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop, True ) )
self.assertTrue( sop.isHardLocked() )
self.comparePrimAndAppendedSop( mesh, sop, result, multipleConversions=True )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
resultNumPoints = result.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + 3*meshNumPoints )
for i in range( 0, mesh["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], mesh["P"].data[i] )
self.assertEqual( result["P"].data[ origNumPoints + meshNumPoints + i ], mesh["P"].data[i] )
self.assertEqual( result["P"].data[ origNumPoints + 2*meshNumPoints + i ], mesh["P"].data[i] )
def testObjectWasDeleted( self ) :
mesh = self.mesh()
sop = self.meshSop()
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
self.assertTrue( converter.convert( sop, False ) )
self.comparePrimAndSop( mesh, sop )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
del mesh
sop.setHardLocked( False )
self.assertNotEqual( IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert(), result )
self.assertTrue( converter.convert( sop, False ) )
self.assertEqual( IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert(), result )
def testWithUnacceptablePrimVars( self ) :
mesh = self.mesh()
mesh["badDetail"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.TransformationMatrixfData() )
mesh["badPoint"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.DoubleVectorData( [ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5 ] ) )
mesh["badPrim"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.DoubleVectorData( [ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5 ] ) )
mesh["badVert"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.DoubleVectorData( [ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5 ] ) )
sop = self.emptySop()
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
self.assertTrue( "badDetail" not in [ x.name() for x in sop.geometry().globalAttribs() ] )
self.assertTrue( "badPoint" not in [ x.name() for x in sop.geometry().pointAttribs() ] )
self.assertTrue( "badPrim" not in [ x.name() for x in sop.geometry().primAttribs() ] )
self.assertTrue( "badVert" not in [ x.name() for x in sop.geometry().vertexAttribs() ] )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
self.assertNotEqual( result, mesh )
self.assertTrue( "badDetail" not in result )
self.assertTrue( "badPoint" not in result )
self.assertTrue( "badPrim" not in result )
self.assertTrue( "badVert" not in result )
del mesh["badDetail"]
del mesh["badPoint"]
del mesh["badPrim"]
del mesh["badVert"]
self.comparePrimAndSop( mesh, sop )
def testConvertingOverExistingAttribs( self ) :
mesh = self.mesh()
sop = self.emptySop()
detailAttr = sop.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm( "name" ).set( "floatDetail" )
detailAttr.parm( "class" ).set( 0 ) # detail
detailAttr.parm( "type" ).set( 0 ) # float
detailAttr.parm( "size" ).set( 1 ) # 1 element
detailAttr.parm( "value1" ).set( 123.456 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm( "name" ).set( "floatPoint" )
pointAttr.parm( "class" ).set( 2 ) # point
pointAttr.parm( "type" ).set( 0 ) # float
pointAttr.parm( "size" ).set( 1 ) # 1 element
pointAttr.parm( "value1" ).set( 123.456 )
primAttr = pointAttr.createOutputNode( "attribcreate", exact_type_name=True )
primAttr.parm( "name" ).set( "floatPrim" )
primAttr.parm( "class" ).set( 1 ) # prim
primAttr.parm( "type" ).set( 0 ) # float
primAttr.parm( "size" ).set( 1 ) # 1 element
primAttr.parm( "value1" ).set( 123.456 )
vertexAttr = primAttr.createOutputNode( "attribcreate", exact_type_name=True )
vertexAttr.parm( "name" ).set( "floatVert" )
vertexAttr.parm( "class" ).set( 3 ) # vertex
vertexAttr.parm( "type" ).set( 0 ) # float
vertexAttr.parm( "size" ).set( 1 ) # 1 element
vertexAttr.parm( "value1" ).set( 123.456 )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( vertexAttr ) )
self.comparePrimAndSop( mesh, vertexAttr )
def testConvertingOverExistingAttribsWithDifferentTypes( self ) :
mesh = self.mesh()
sop = self.emptySop()
detailAttr = sop.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm( "name" ).set( "floatDetail" )
detailAttr.parm( "class" ).set( 0 ) # detail
detailAttr.parm( "type" ).set( 1 ) # int
detailAttr.parm( "size" ).set( 3 ) # 3 elements
detailAttr.parm( "value1" ).set( 10 )
detailAttr.parm( "value2" ).set( 11 )
detailAttr.parm( "value3" ).set( 12 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm( "name" ).set( "floatPoint" )
pointAttr.parm( "class" ).set( 2 ) # point
pointAttr.parm( "type" ).set( 1 ) # int
pointAttr.parm( "size" ).set( 3 ) # 3 elements
pointAttr.parm( "value1" ).set( 10 )
pointAttr.parm( "value2" ).set( 11 )
pointAttr.parm( "value3" ).set( 12 )
primAttr = pointAttr.createOutputNode( "attribcreate", exact_type_name=True )
primAttr.parm( "name" ).set( "floatPrim" )
primAttr.parm( "class" ).set( 1 ) # prim
primAttr.parm( "type" ).set( 1 ) # int
primAttr.parm( "size" ).set( 3 ) # 3 elements
primAttr.parm( "value1" ).set( 10 )
primAttr.parm( "value2" ).set( 11 )
primAttr.parm( "value3" ).set( 12 )
vertexAttr = primAttr.createOutputNode( "attribcreate", exact_type_name=True )
vertexAttr.parm( "name" ).set( "floatVert" )
vertexAttr.parm( "class" ).set( 3 ) # vert
vertexAttr.parm( "type" ).set( 1 ) # int
vertexAttr.parm( "size" ).set( 3 ) # 3 elements
vertexAttr.parm( "value1" ).set( 10 )
vertexAttr.parm( "value2" ).set( 11 )
vertexAttr.parm( "value3" ).set( 12 )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( vertexAttr ) )
self.comparePrimAndSop( mesh, vertexAttr )
def testEmptyString( self ) :
mesh = self.mesh()
sop = self.emptySop()
mesh['stringPoint'].data[0] = ""
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
geo = sop.geometry()
sopPoints = geo.points()
data = mesh["stringPoint"].data
dataIndices = mesh["stringPoint"].indices
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPoints[i].attribValue( "stringPoint" ) )
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
self.assertEqual( result.verticesPerFace, mesh.verticesPerFace )
self.assertEqual( result.vertexIds, mesh.vertexIds )
self.assertEqual( result.keys(), mesh.keys() )
self.assertEqual( result["stringPoint"], mesh["stringPoint"] )
def testName( self ) :
sop = self.emptySop()
mesh = self.mesh()
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
# unnamed unless we set the parameter
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
self.assertEqual( sop.geometry().findPrimAttrib( "name" ), None )
converter["name"].setTypedValue( "testMesh" )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ "testMesh" ] ) )
self.assertEqual( len([ x for x in geo.prims() if x.attribValue( "name" ) == "testMesh" ]), mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ) )
# blindData still works for backwards compatibility
mesh.blindData()["name"] = IECore.StringData( "blindMesh" )
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ "blindMesh" ] ) )
self.assertEqual( len([ x for x in geo.prims() if x.attribValue( "name" ) == "blindMesh" ]), mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ) )
# name parameter takes preference over blindData
converter["name"].setTypedValue( "testMesh" )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ "testMesh" ] ) )
self.assertEqual( len([ x for x in geo.prims() if x.attribValue( "name" ) == "testMesh" ]), mesh.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ) )
def testAttributeFilter( self ) :
mesh = self.mesh()
sop = self.emptySop()
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
self.assertTrue( converter.convert( sop ) )
self.assertItemsEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), TestToHoudiniPolygonsConverter.PointPositionAttribs + ['color3fPoint', 'floatPoint', 'intPoint', 'stringPoint', 'v2fPoint', 'v2iPoint', 'v3fPoint', 'v3iPoint'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['color3fPrim', 'floatPrim', 'ieMeshInterpolation', 'intPrim', 'stringPrim', 'v2fPrim', 'v2iPrim', 'v3fPrim', 'v3iPrim'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), ['color3fVert', 'floatVert', 'intVert', 'stringVert', 'v2fVert', 'v2iVert', 'v3fVert', 'v3iVert'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), ['color3fDetail', 'floatDetail', 'intDetail', 'stringDetail', 'v2fDetail', 'v2iDetail', 'v3fDetail', 'v3iDetail'] )
converter.parameters()["attributeFilter"].setTypedValue( "P *3f*" )
self.assertTrue( converter.convert( sop ) )
self.assertItemsEqual( [ x.name() for x in sop.geometry().pointAttribs() ], TestToHoudiniPolygonsConverter.PointPositionAttribs + ['color3fPoint', 'v3fPoint'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['color3fPrim', 'ieMeshInterpolation', 'v3fPrim'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), ['color3fVert', 'v3fVert'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), ['color3fDetail', 'v3fDetail'] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^*Detail ^int*" )
self.assertTrue( converter.convert( sop ) )
self.assertItemsEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), TestToHoudiniPolygonsConverter.PointPositionAttribs + ['color3fPoint', 'floatPoint', 'stringPoint', 'v2fPoint', 'v2iPoint', 'v3fPoint', 'v3iPoint'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['color3fPrim', 'floatPrim', 'ieMeshInterpolation', 'stringPrim', 'v2fPrim', 'v2iPrim', 'v3fPrim', 'v3iPrim'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), ['color3fVert', 'floatVert', 'stringVert', 'v2fVert', 'v2iVert', 'v3fVert', 'v3iVert'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
# verify we can filter uvs
mesh = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
mesh = IECoreScene.MeshAlgo.triangulate( mesh )
IECoreScene.MeshNormalsOp()( input=mesh, copyInput=False )
mesh["Cs"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.V3fVectorData( [ imath.V3f( 1, 0, 0 ) ] * 6, IECore.GeometricData.Interpretation.Color ) )
mesh["width"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1 ] * 4 ) )
mesh["Pref"] = mesh["P"]
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
converter.parameters()["attributeFilter"].setTypedValue( "*" )
self.assertTrue( converter.convert( sop ) )
self.assertItemsEqual( [ x.name() for x in sop.geometry().pointAttribs() ], TestToHoudiniPolygonsConverter.PointPositionAttribs + ['N', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['ieMeshInterpolation', ] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
# have to filter the source attrs
converter.parameters()["attributeFilter"].setTypedValue( "* ^uv ^pscale ^rest" )
self.assertTrue( converter.convert( sop ) )
self.assertItemsEqual( [ x.name() for x in sop.geometry().pointAttribs() ], TestToHoudiniPolygonsConverter.PointPositionAttribs + ['N', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['ieMeshInterpolation', ] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), ['Cd'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^width ^Pref" )
self.assertTrue( converter.convert( sop ) )
self.assertItemsEqual( [ x.name() for x in sop.geometry().pointAttribs() ], TestToHoudiniPolygonsConverter.PointPositionAttribs + ['N'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['ieMeshInterpolation', ] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
def testStandardAttributeConversion( self ) :
sop = self.emptySop()
mesh = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
mesh = IECoreScene.MeshAlgo.triangulate( mesh )
IECoreScene.MeshNormalsOp()( input=mesh, copyInput=False )
mesh["Cs"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.V3fVectorData( [ imath.V3f( 1, 0, 0 ) ] * 6, IECore.GeometricData.Interpretation.Color ) )
mesh["width"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1 ] * 4 ) )
mesh["Pref"] = mesh["P"]
self.assertTrue( mesh.arePrimitiveVariablesValid() )
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
self.assertItemsEqual( [ x.name() for x in geo.pointAttribs() ], TestToHoudiniPolygonsConverter.PointPositionAttribs + ['N', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in geo.primAttribs() ]), ['ieMeshInterpolation'] )
self.assertEqual( sorted([ x.name() for x in geo.vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in geo.globalAttribs() ]), [] )
uvData = mesh["uv"].data
indices = mesh["uv"].indices
uvs = geo.findVertexAttrib( "uv" )
i = 0
for prim in geo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( uvs )
self.assertAlmostEqual( uvValues[0], uvData[indices[i]][0] )
self.assertAlmostEqual( uvValues[1], uvData[indices[i]][1] )
i += 1
converter["convertStandardAttributes"].setTypedValue( False )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
self.assertItemsEqual( [ x.name() for x in geo.pointAttribs() ], TestToHoudiniPolygonsConverter.PointPositionAttribs + ['N', 'Pref', 'width'] )
self.assertEqual( sorted([ x.name() for x in geo.primAttribs() ]), ['ieMeshInterpolation', ] )
self.assertEqual( sorted([ x.name() for x in geo.vertexAttribs() ]), ['Cs', 'uv'] )
self.assertEqual( sorted([ x.name() for x in geo.globalAttribs() ]), [] )
uvs = geo.findVertexAttrib( "uv" )
i = 0
for prim in geo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( uvs )
self.assertAlmostEqual( uvValues[0], uvData[indices[i]][0] )
self.assertAlmostEqual( uvValues[1], uvData[indices[i]][1] )
i += 1
def testCannotTransformRest( self ) :
sop = self.emptySop()
mergeGeo = hou.node( "/obj" ).createNode( "geo", run_init_scripts=False )
mergeGeo.parm( "tx" ).set( 10 )
merge = mergeGeo.createNode( "object_merge" )
merge.parm( "xformtype" ).set( 1 )
merge.parm( "objpath1" ).set( sop.path() )
mesh = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
mesh = IECoreScene.MeshAlgo.triangulate( mesh )
IECoreScene.MeshNormalsOp()( input=mesh, copyInput=False )
mesh["Pref"] = mesh["P"]
prefData = mesh["Pref"].data
self.assertTrue( mesh.arePrimitiveVariablesValid() )
converter = IECoreHoudini.ToHoudiniPolygonsConverter( mesh )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
geo2 = merge.geometry()
i = 0
for point in geo.points() :
restValue = point.attribValue( "rest" )
self.assertAlmostEqual( imath.V3f( restValue[0], restValue[1], restValue[2] ), prefData[i] )
self.assertTrue( point.position().isAlmostEqual( hou.Vector3(restValue) ) )
i += 1
i = 0
for point in geo2.points() :
restValue = point.attribValue( "rest" )
self.assertAlmostEqual( imath.V3f( restValue[0], restValue[1], restValue[2] ), prefData[i] )
self.assertFalse( point.position().isAlmostEqual( hou.Vector3(restValue) ) )
i += 1
# Pref shouldn't transform either
converter["convertStandardAttributes"].setTypedValue( False )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
geo2 = merge.geometry()
i = 0
for point in geo.points() :
restValue = point.attribValue( "Pref" )
self.assertAlmostEqual( imath.V3f( restValue[0], restValue[1], restValue[2] ), prefData[i] )
self.assertTrue( point.position().isAlmostEqual( hou.Vector3(restValue) ) )
i += 1
i = 0
for point in geo2.points() :
restValue = point.attribValue( "Pref" )
self.assertAlmostEqual( imath.V3f( restValue[0], restValue[1], restValue[2] ), prefData[i] )
self.assertFalse( point.position().isAlmostEqual( hou.Vector3(restValue) ) )
i += 1
def testInterpolation( self ) :
mesh = self.mesh()
sop = self.emptySop()
self.assertEqual( mesh.interpolation, "linear" )
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
self.assertTrue( "ieMeshInterpolation" in [ x.name() for x in sop.geometry().primAttribs() ] )
attrib = sop.geometry().findPrimAttrib( "ieMeshInterpolation" )
for prim in sop.geometry().prims() :
self.assertEqual( prim.attribValue( attrib ), "poly" )
mesh.interpolation = "catmullClark"
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
self.assertTrue( "ieMeshInterpolation" in [ x.name() for x in sop.geometry().primAttribs() ] )
attrib = sop.geometry().findPrimAttrib( "ieMeshInterpolation" )
for prim in sop.geometry().prims() :
self.assertEqual( prim.attribValue( attrib ), "subdiv" )
def testExpandedUVRoundTrip( self ) :
mesh = IECore.Reader.create( "test/IECore/data/cobFiles/twoTrianglesWithSharedUVs.cob" ).read()
mesh["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, mesh["uv"].expandedData(), None )
mesh["uv"].indices = None
uvData = mesh["uv"].data
sop = self.emptySop()
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
geo = sop.geometry()
self.assertTrue( "uv" in [ x.name() for x in geo.vertexAttribs() ] )
uvs = geo.findVertexAttrib( "uv" )
i = 0
for prim in geo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( uvs )
self.assertAlmostEqual( uvValues[0], uvData[i][0] )
self.assertAlmostEqual( uvValues[1], uvData[i][1] )
i += 1
converter = IECoreHoudini.FromHoudiniPolygonsConverter( sop )
result = converter.convert()
self.assertEqual( result["uv"].data.getInterpretation(), IECore.GeometricData.Interpretation.UV )
# we cannot guarantee to generate the same data when extracting from Houdini
# because we always generate indices, but we can generate correctly indexed data
self.assertEqual( result["uv"].data.size(), 4 )
self.assertEqual( result["uv"].indices.size(), 6 )
for i in range( 0, mesh.variableSize( mesh["uv"].interpolation ) ) :
self.assertEqual( mesh["uv"].data[i], result["uv"].data[ result["uv"].indices[i] ] )
def testIndexedUVRoundTrip( self ) :
mesh = IECore.Reader.create( "test/IECore/data/cobFiles/twoTrianglesWithSharedUVs.cob" ).read()
uvData = mesh["uv"].data
uvIndices = mesh["uv"].indices
sop = self.emptySop()
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
geo = sop.geometry()
self.assertTrue( "uv" in [ x.name() for x in geo.vertexAttribs() ] )
uvs = geo.findVertexAttrib( "uv" )
i = 0
for prim in geo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( uvs )
self.assertAlmostEqual( uvValues[0], uvData[uvIndices[i]][0] )
self.assertAlmostEqual( uvValues[1], uvData[uvIndices[i]][1] )
i += 1
converter = IECoreHoudini.FromHoudiniPolygonsConverter( sop )
result = converter.convert()
self.assertEqual( result["uv"].data.getInterpretation(), IECore.GeometricData.Interpretation.UV )
# we cannot guarantee to generate the same indices when extracting from Houdini
# nor the same data, but we can generate correctly indexed data
self.assertEqual( result["uv"].data.size(), 4 )
self.assertEqual( result["uv"].indices.size(), 6 )
for i in range( 0, mesh.variableSize( mesh["uv"].interpolation ) ) :
self.assertEqual( mesh["uv"].data[ mesh["uv"].indices[i] ], result["uv"].data[ result["uv"].indices[i] ] )
def testCornersAndCreases( self ) :
mesh = IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) )
# normals and UVs complicate the testing, and we don't need them to verify corners and creases
del mesh["N"]
del mesh["uv"]
cornerIds = [ 5 ]
cornerSharpnesses = [ 10.0 ]
mesh.setCorners( IECore.IntVectorData( cornerIds ), IECore.FloatVectorData( cornerSharpnesses ) )
creaseLengths = [ 3, 2 ]
creaseIds = [ 1, 2, 3, 4, 5 ] # note that these are vertex ids
creaseSharpnesses = [ 1, 5 ]
mesh.setCreases( IECore.IntVectorData( creaseLengths ), IECore.IntVectorData( creaseIds ), IECore.FloatVectorData( creaseSharpnesses ) )
sop = self.emptySop()
self.assertTrue( IECoreHoudini.ToHoudiniPolygonsConverter( mesh ).convert( sop ) )
geo = sop.geometry()
self.assertTrue( "cornerweight" in [ x.name() for x in geo.pointAttribs() ] )
self.assertTrue( "creaseweight" in [ x.name() for x in geo.vertexAttribs() ] )
# test corners
cornerWeight = geo.findPointAttrib( "cornerweight" )
for point in geo.points() :
sharpness = 0.0
if point.number() in cornerIds :
sharpness = cornerSharpnesses[ cornerIds.index( point.number() ) ]
self.assertEqual( point.attribValue( cornerWeight ), sharpness )
# test creases
expectedSharpnesses = [ 0 ] * 24
# edge 1-2
expectedSharpnesses[1] = 1
expectedSharpnesses[2] = 1
# edge 2-3
expectedSharpnesses[6] = 1
expectedSharpnesses[18] = 1
# edge 4-5
expectedSharpnesses[4] = 5
expectedSharpnesses[10] = 5
self.assertEqual( list(geo.vertexFloatAttribValues( "creaseweight" )), expectedSharpnesses )
# make sure it round trips well enough
result = IECoreHoudini.FromHoudiniPolygonsConverter( sop ).convert()
self.assertEqual( result.cornerIds(), mesh.cornerIds() )
self.assertEqual( result.cornerSharpnesses(), mesh.cornerSharpnesses() )
self.assertEqual( result.creaseLengths(), IECore.IntVectorData( [ 2, 2, 2 ] ) )
self.assertEqual( result.creaseIds(), IECore.IntVectorData( [ 2, 3, 1, 2, 4, 5 ] ) )
self.assertEqual( result.creaseSharpnesses(), IECore.FloatVectorData( [ 1, 1, 5 ] ) )
# if we re-align result creases, everything else is an exact match
mesh.setCreases( result.creaseLengths(), result.creaseIds(), result.creaseSharpnesses() )
self.assertEqual( result, mesh )
def tearDown( self ) :
if os.path.isfile( TestToHoudiniPolygonsConverter.__testScene ) :
os.remove( TestToHoudiniPolygonsConverter.__testScene )
if __name__ == "__main__":
unittest.main()
| en | 0.645139 | ########################################################################## # # Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## # add all valid detail attrib types # add all valid point attrib types # add all valid primitive attrib types # add all valid vertex attrib types # detail # float # 1 element # point # float # 1 element # prim # float # 1 element # vertex # float # 1 element # detail # int # 3 elements # point # int # 3 elements # prim # int # 3 elements # vert # int # 3 elements # unnamed unless we set the parameter # blindData still works for backwards compatibility # name parameter takes preference over blindData # verify we can filter uvs # have to filter the source attrs # Pref shouldn't transform either # we cannot guarantee to generate the same data when extracting from Houdini # because we always generate indices, but we can generate correctly indexed data # we cannot guarantee to generate the same indices when extracting from Houdini # nor the same data, but we can generate correctly indexed data # normals and UVs complicate the testing, and we don't need them to verify corners and creases # note that these are vertex ids # test corners # test creases # edge 1-2 # edge 2-3 # edge 4-5 # make sure it round trips well enough # if we re-align result creases, everything else is an exact match | 0.774036 | 1 |
examples/bar_chart_examples.py | ahlusar1989/vincent | 1,052 | 6632994 | # -*- coding: utf-8 -*-
"""
Vincent Bar Chart Example
"""
#Build a Bar Chart from scratch
from vincent import *
import pandas as pd
farm_1 = {'apples': 10, 'berries': 32, 'squash': 21, 'melons': 13, 'corn': 18}
farm_2 = {'apples': 15, 'berries': 43, 'squash': 17, 'melons': 10, 'corn': 22}
farm_3 = {'apples': 6, 'berries': 24, 'squash': 22, 'melons': 16, 'corn': 30}
farm_4 = {'apples': 12, 'berries': 30, 'squash': 15, 'melons': 9, 'corn': 15}
data = [farm_1, farm_2, farm_3, farm_4]
index = ['Farm 1', 'Farm 2', 'Farm 3', 'Farm 4']
df = pd.DataFrame(data, index=index)
vis = Visualization(width=500, height=300)
vis.scales['x'] = Scale(name='x', type='ordinal', range='width',
domain=DataRef(data='table', field="data.idx"))
vis.scales['y'] = Scale(name='y', range='height', nice=True,
domain=DataRef(data='table', field="data.val"))
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
#Marks
enter_props = PropertySet(x=ValueRef(scale='x', field="data.idx"),
y=ValueRef(scale='y', field="data.val"),
width=ValueRef(scale='x', band=True, offset=-1),
y2=ValueRef(scale='y', value=0))
update_props = PropertySet(fill=ValueRef(value='steelblue'))
mark = Mark(type='rect', from_=MarkRef(data='table'),
properties=MarkProperties(enter=enter_props,
update=update_props))
vis.marks.append(mark)
data = Data.from_pandas(df['apples'])
#Using a Vincent KeyedList here
vis.data['table'] = data
vis.axis_titles(x='Farms', y='Data')
vis.to_json('vega.json')
#Convenience methods
vis = Bar(df['apples'])
#Fruit
trans = df.T
vis = Bar(trans['Farm 1'])
#From dict
vis = Bar(farm_1)
#From dict of iterables
vis = Bar({'x': ['apples', 'berries', 'squash', 'melons', 'corn'],
'y': [10, 32, 21, 13, 18]}, iter_idx='x')
#Finally, a boring bar chart from a list
vis = Bar([10, 20, 30, 15, 35, 10, 20])
| # -*- coding: utf-8 -*-
"""
Vincent Bar Chart Example
"""
#Build a Bar Chart from scratch
from vincent import *
import pandas as pd
farm_1 = {'apples': 10, 'berries': 32, 'squash': 21, 'melons': 13, 'corn': 18}
farm_2 = {'apples': 15, 'berries': 43, 'squash': 17, 'melons': 10, 'corn': 22}
farm_3 = {'apples': 6, 'berries': 24, 'squash': 22, 'melons': 16, 'corn': 30}
farm_4 = {'apples': 12, 'berries': 30, 'squash': 15, 'melons': 9, 'corn': 15}
data = [farm_1, farm_2, farm_3, farm_4]
index = ['Farm 1', 'Farm 2', 'Farm 3', 'Farm 4']
df = pd.DataFrame(data, index=index)
vis = Visualization(width=500, height=300)
vis.scales['x'] = Scale(name='x', type='ordinal', range='width',
domain=DataRef(data='table', field="data.idx"))
vis.scales['y'] = Scale(name='y', range='height', nice=True,
domain=DataRef(data='table', field="data.val"))
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
#Marks
enter_props = PropertySet(x=ValueRef(scale='x', field="data.idx"),
y=ValueRef(scale='y', field="data.val"),
width=ValueRef(scale='x', band=True, offset=-1),
y2=ValueRef(scale='y', value=0))
update_props = PropertySet(fill=ValueRef(value='steelblue'))
mark = Mark(type='rect', from_=MarkRef(data='table'),
properties=MarkProperties(enter=enter_props,
update=update_props))
vis.marks.append(mark)
data = Data.from_pandas(df['apples'])
#Using a Vincent KeyedList here
vis.data['table'] = data
vis.axis_titles(x='Farms', y='Data')
vis.to_json('vega.json')
#Convenience methods
vis = Bar(df['apples'])
#Fruit
trans = df.T
vis = Bar(trans['Farm 1'])
#From dict
vis = Bar(farm_1)
#From dict of iterables
vis = Bar({'x': ['apples', 'berries', 'squash', 'melons', 'corn'],
'y': [10, 32, 21, 13, 18]}, iter_idx='x')
#Finally, a boring bar chart from a list
vis = Bar([10, 20, 30, 15, 35, 10, 20])
| en | 0.728599 | # -*- coding: utf-8 -*- Vincent Bar Chart Example #Build a Bar Chart from scratch #Marks #Using a Vincent KeyedList here #Convenience methods #Fruit #From dict #From dict of iterables #Finally, a boring bar chart from a list | 3.407511 | 3 |
src/charma/persons/actors/handler.py | mononobi/charma-server | 1 | 6632995 | # -*- coding: utf-8 -*-
"""
actors handler module.
"""
import charma.persons.actors.services as actor_services
from charma.persons.decorators import person_handler
from charma.persons.enumerations import PersonTypeEnum
from charma.persons.handler import AbstractPersonHandler
@person_handler()
class ActorHandler(AbstractPersonHandler):
"""
actor handler class.
"""
name = PersonTypeEnum.ACTOR
def create(self, id, **options):
"""
creates an actor with given inputs.
:param uuid.UUID id: person id.
"""
actor_services.create(id, **options)
| # -*- coding: utf-8 -*-
"""
actors handler module.
"""
import charma.persons.actors.services as actor_services
from charma.persons.decorators import person_handler
from charma.persons.enumerations import PersonTypeEnum
from charma.persons.handler import AbstractPersonHandler
@person_handler()
class ActorHandler(AbstractPersonHandler):
"""
actor handler class.
"""
name = PersonTypeEnum.ACTOR
def create(self, id, **options):
"""
creates an actor with given inputs.
:param uuid.UUID id: person id.
"""
actor_services.create(id, **options)
| en | 0.656762 | # -*- coding: utf-8 -*- actors handler module. actor handler class. creates an actor with given inputs. :param uuid.UUID id: person id. | 2.757755 | 3 |
holecmm.py | joelmeyerson/hole-cmm | 0 | 6632996 | #------------------------------------------------- holecmm.py --------------------------------------------
#
# Python script to convert the output of HOLE to a CMM file that can be visualized in Chimera or ChimeraX.
#
# Usage:
# python holecmm.py
#
# Show inputs:
# python holecmm.py -h
#
# Parameters:
# -i <input file> (required)
# -o <output file name> (optional, defaults to input file base name with cmm file extension)
# -r <value for sphere radius> (optional, defaults to 0.2)
# -c1 <Hex color value for pore radius < 1.15 Ang> (optional, defaults to red FF0000)
# -c2 <Hex color value for 1.15 Ang > pore radius < 2.30 Ang> (optional, defaults to green 00FF00)
# -c3 <Hex color value for pore radius > 2.30 Ang> (optional, defaults to blue 0000FF)
#
# Examples:
# python holecmm.py -i dotsurface-kcsa.vmd_plot
# python holecmm.py -i dotsurface-kcsa.vmd_plot -o kcsa.cmm -r 0.2 -c1 FF6347 -c2 90EE90 -c3 6495ED
# python holecmm.py -i hole-surface-dots.dat
#
#---------------------------------------------------------------------------------------------------------
import sys, os, re, argparse
# create argument parser
parser = argparse.ArgumentParser(description='Convert HOLE output to a CMM file.')
parser.add_argument('-i', metavar='input', type=str, help='Input file.', required=True)
parser.add_argument('-o', metavar='output', type=str, default='NA', help='Output file.')
parser.add_argument('-r', metavar='radius', type=float, default='0.2', help='Radius for markers.')
parser.add_argument('-c1', metavar='color', type=str, default='NA', help='Hex color for pore radius less than 1.15 Ang.')
parser.add_argument('-c2', metavar='color', type=str, default='NA', help='Hex color for pore radius between 1.15 Ang and 2.30 Ang.')
parser.add_argument('-c3', metavar='color', type=str, default='NA', help='Hex color for pore radius greater than 2.30 Ang.')
# parse args
args = parser.parse_args()
# parse input
ipath = args.i # input and path
iname = os.path.basename(args.i) # input file name
ibasename = os.path.splitext(iname)[0] # input base name
iext = os.path.splitext(iname)[-1] # input extension
# check input file exists
if os.path.isfile(ipath):
pass
else:
print("Input file not found.")
exit()
# parse output
if args.o == 'NA':
oname = 'hole.cmm'
else:
oname = args.o;
# parse colors
if args.c1 == 'NA': c1 = 'FF0000'
else: c1 = args.c1
if args.c2 == 'NA': c2 = '00FF00'
else: c2 = args.c2
if args.c3 == 'NA': c3 = '0000FF'
else: c3 = args.c3
if len(c1) == 6 and len(c2) == 6 and len(c3) == 6:
if c1[1:].isalnum() and c2[1:].isalnum() and c3[1:].isalnum():
pass
else:
print("Colors must be in hex format.")
exit()
else:
print("Colors must be in hex format.")
exit()
# convert from HEX to RGB
c1R, c1G, c1B = int(c1[0:2], 16)/255.0, int(c1[2:4], 16)/255.0, int(c1[4:6], 16)/255.0
c2R, c2G, c2B = int(c2[0:2], 16)/255.0, int(c2[2:4], 16)/255.0, int(c2[4:6], 16)/255.0
c3R, c3G, c3B = int(c3[0:2], 16)/255.0, int(c3[2:4], 16)/255.0, int(c3[4:6], 16)/255.0
# read input file
ifile = open(ipath, 'r')
# create output file and add header
ofile = open(oname, 'w+')
ofile.write('<marker_set name="marker set 1">\n')
# marker ID counter
id = 0
if iext == '.vmd_plot': # process .vmd_plot file from HOLE
for line in ifile.readlines():
# iterate ID counter
id += 1
if line.startswith('draw point'):
# extract x, y, z coordinates from lines in .vmd_plot file
[x, y, z] = re.findall('\d+\.\d+', line)
x = float(x)
y = float(y)
z = float(z)
# write line to CMM file
ofile.write("<marker id=\"%d\" x=\"%5.2f\" y=\"%5.2f\" z=\"%5.2f\" %s radius=\"%2.1f\"/>\n" % (id, x, y, z, color, args.r))
elif line.startswith('draw color yellow'):
pass
elif line.startswith('draw color red'):
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (c1R, c1G, c1B)
elif line.startswith('draw color green'):
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (c2R, c2G, c2B)
elif line.startswith('draw color blue'):
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (c3R, c3G, c3B)
else:
pass
elif iext == '.dat': # process .dat file from HOLE in COOT
# true if any colors specified
customcolor = args.c1 != 'NA' or args.c2 != 'NA' or args.c3 != 'NA'
if customcolor == False:
for line in ifile.readlines():
# iterate ID counter
id += 1
# extract x, y, z coordinates from lines in .dat file
x = float(line.split()[0])
y = float(line.split()[1])
z = float(line.split()[2])
r = float(line.split()[3])
g = float(line.split()[4])
b = float(line.split()[5])
# set color
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (r, g, b)
# write line to CMM file
ofile.write("<marker id=\"%d\" x=\"%5.2f\" y=\"%5.2f\" z=\"%5.2f\" %s radius=\"%2.1f\"/>\n" % (id, x, y, z, color, args.r))
else: # use custom colors
for line in ifile.readlines():
# iterate ID counter
id += 1
# extract x, y, z coordinates from lines in .dat file
x = float(line.split()[0])
y = float(line.split()[1])
z = float(line.split()[2])
r = float(line.split()[3])
g = float(line.split()[4])
b = float(line.split()[5])
if r >= g and r >= b:
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (c1R, c1G, c1B)
elif g >= r and g >= b:
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (c2R, c2G, c2B)
else:
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (c3R, c3G, c3B)
# write line to CMM file
ofile.write("<marker id=\"%d\" x=\"%5.2f\" y=\"%5.2f\" z=\"%5.2f\" %s radius=\"%2.1f\"/>\n" % (id, x, y, z, color, args.r))
else:
print("Input file %s does not have .vmd_plot or .dat file extension." % iname)
exit()
# close input file
ifile.close()
# write footer and close output file
ofile.write('</marker_set>')
ofile.close()
| #------------------------------------------------- holecmm.py --------------------------------------------
#
# Python script to convert the output of HOLE to a CMM file that can be visualized in Chimera or ChimeraX.
#
# Usage:
# python holecmm.py
#
# Show inputs:
# python holecmm.py -h
#
# Parameters:
# -i <input file> (required)
# -o <output file name> (optional, defaults to input file base name with cmm file extension)
# -r <value for sphere radius> (optional, defaults to 0.2)
# -c1 <Hex color value for pore radius < 1.15 Ang> (optional, defaults to red FF0000)
# -c2 <Hex color value for 1.15 Ang > pore radius < 2.30 Ang> (optional, defaults to green 00FF00)
# -c3 <Hex color value for pore radius > 2.30 Ang> (optional, defaults to blue 0000FF)
#
# Examples:
# python holecmm.py -i dotsurface-kcsa.vmd_plot
# python holecmm.py -i dotsurface-kcsa.vmd_plot -o kcsa.cmm -r 0.2 -c1 FF6347 -c2 90EE90 -c3 6495ED
# python holecmm.py -i hole-surface-dots.dat
#
#---------------------------------------------------------------------------------------------------------
import sys, os, re, argparse
# create argument parser
parser = argparse.ArgumentParser(description='Convert HOLE output to a CMM file.')
parser.add_argument('-i', metavar='input', type=str, help='Input file.', required=True)
parser.add_argument('-o', metavar='output', type=str, default='NA', help='Output file.')
parser.add_argument('-r', metavar='radius', type=float, default='0.2', help='Radius for markers.')
parser.add_argument('-c1', metavar='color', type=str, default='NA', help='Hex color for pore radius less than 1.15 Ang.')
parser.add_argument('-c2', metavar='color', type=str, default='NA', help='Hex color for pore radius between 1.15 Ang and 2.30 Ang.')
parser.add_argument('-c3', metavar='color', type=str, default='NA', help='Hex color for pore radius greater than 2.30 Ang.')
# parse args
args = parser.parse_args()
# parse input
ipath = args.i # input and path
iname = os.path.basename(args.i) # input file name
ibasename = os.path.splitext(iname)[0] # input base name
iext = os.path.splitext(iname)[-1] # input extension
# check input file exists
if os.path.isfile(ipath):
pass
else:
print("Input file not found.")
exit()
# parse output
if args.o == 'NA':
oname = 'hole.cmm'
else:
oname = args.o;
# parse colors
if args.c1 == 'NA': c1 = 'FF0000'
else: c1 = args.c1
if args.c2 == 'NA': c2 = '00FF00'
else: c2 = args.c2
if args.c3 == 'NA': c3 = '0000FF'
else: c3 = args.c3
if len(c1) == 6 and len(c2) == 6 and len(c3) == 6:
if c1[1:].isalnum() and c2[1:].isalnum() and c3[1:].isalnum():
pass
else:
print("Colors must be in hex format.")
exit()
else:
print("Colors must be in hex format.")
exit()
# convert from HEX to RGB
c1R, c1G, c1B = int(c1[0:2], 16)/255.0, int(c1[2:4], 16)/255.0, int(c1[4:6], 16)/255.0
c2R, c2G, c2B = int(c2[0:2], 16)/255.0, int(c2[2:4], 16)/255.0, int(c2[4:6], 16)/255.0
c3R, c3G, c3B = int(c3[0:2], 16)/255.0, int(c3[2:4], 16)/255.0, int(c3[4:6], 16)/255.0
# read input file
ifile = open(ipath, 'r')
# create output file and add header
ofile = open(oname, 'w+')
ofile.write('<marker_set name="marker set 1">\n')
# marker ID counter
id = 0
if iext == '.vmd_plot': # process .vmd_plot file from HOLE
for line in ifile.readlines():
# iterate ID counter
id += 1
if line.startswith('draw point'):
# extract x, y, z coordinates from lines in .vmd_plot file
[x, y, z] = re.findall('\d+\.\d+', line)
x = float(x)
y = float(y)
z = float(z)
# write line to CMM file
ofile.write("<marker id=\"%d\" x=\"%5.2f\" y=\"%5.2f\" z=\"%5.2f\" %s radius=\"%2.1f\"/>\n" % (id, x, y, z, color, args.r))
elif line.startswith('draw color yellow'):
pass
elif line.startswith('draw color red'):
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (c1R, c1G, c1B)
elif line.startswith('draw color green'):
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (c2R, c2G, c2B)
elif line.startswith('draw color blue'):
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (c3R, c3G, c3B)
else:
pass
elif iext == '.dat': # process .dat file from HOLE in COOT
# true if any colors specified
customcolor = args.c1 != 'NA' or args.c2 != 'NA' or args.c3 != 'NA'
if customcolor == False:
for line in ifile.readlines():
# iterate ID counter
id += 1
# extract x, y, z coordinates from lines in .dat file
x = float(line.split()[0])
y = float(line.split()[1])
z = float(line.split()[2])
r = float(line.split()[3])
g = float(line.split()[4])
b = float(line.split()[5])
# set color
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (r, g, b)
# write line to CMM file
ofile.write("<marker id=\"%d\" x=\"%5.2f\" y=\"%5.2f\" z=\"%5.2f\" %s radius=\"%2.1f\"/>\n" % (id, x, y, z, color, args.r))
else: # use custom colors
for line in ifile.readlines():
# iterate ID counter
id += 1
# extract x, y, z coordinates from lines in .dat file
x = float(line.split()[0])
y = float(line.split()[1])
z = float(line.split()[2])
r = float(line.split()[3])
g = float(line.split()[4])
b = float(line.split()[5])
if r >= g and r >= b:
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (c1R, c1G, c1B)
elif g >= r and g >= b:
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (c2R, c2G, c2B)
else:
color = "r=\"%3.2f\" g=\"%3.2f\" b=\"%3.2f\"" % (c3R, c3G, c3B)
# write line to CMM file
ofile.write("<marker id=\"%d\" x=\"%5.2f\" y=\"%5.2f\" z=\"%5.2f\" %s radius=\"%2.1f\"/>\n" % (id, x, y, z, color, args.r))
else:
print("Input file %s does not have .vmd_plot or .dat file extension." % iname)
exit()
# close input file
ifile.close()
# write footer and close output file
ofile.write('</marker_set>')
ofile.close()
| en | 0.42817 | #------------------------------------------------- holecmm.py -------------------------------------------- # # Python script to convert the output of HOLE to a CMM file that can be visualized in Chimera or ChimeraX. # # Usage: # python holecmm.py # # Show inputs: # python holecmm.py -h # # Parameters: # -i <input file> (required) # -o <output file name> (optional, defaults to input file base name with cmm file extension) # -r <value for sphere radius> (optional, defaults to 0.2) # -c1 <Hex color value for pore radius < 1.15 Ang> (optional, defaults to red FF0000) # -c2 <Hex color value for 1.15 Ang > pore radius < 2.30 Ang> (optional, defaults to green 00FF00) # -c3 <Hex color value for pore radius > 2.30 Ang> (optional, defaults to blue 0000FF) # # Examples: # python holecmm.py -i dotsurface-kcsa.vmd_plot # python holecmm.py -i dotsurface-kcsa.vmd_plot -o kcsa.cmm -r 0.2 -c1 FF6347 -c2 90EE90 -c3 6495ED # python holecmm.py -i hole-surface-dots.dat # #--------------------------------------------------------------------------------------------------------- # create argument parser # parse args # parse input # input and path # input file name # input base name # input extension # check input file exists # parse output # parse colors # convert from HEX to RGB # read input file # create output file and add header # marker ID counter # process .vmd_plot file from HOLE # iterate ID counter # extract x, y, z coordinates from lines in .vmd_plot file # write line to CMM file # process .dat file from HOLE in COOT # true if any colors specified # iterate ID counter # extract x, y, z coordinates from lines in .dat file # set color # write line to CMM file # use custom colors # iterate ID counter # extract x, y, z coordinates from lines in .dat file # write line to CMM file # close input file # write footer and close output file | 2.459626 | 2 |
SBS.py | hduliufan/work | 0 | 6632997 | #序列反向选择算法sbs
from sklearn.base import clone
#itertools迭代器产生
from itertools import combinations
from sklearn.metrics import accuracy_score
import numpy as np
from sklearn.cross_validation import train_test_split
class SBS(object):
'''
estimator 是采用的方法分类后的模型
'''
def __init__(self,estimator, k_feature, scoring=accuracy_score,
test_size= None ,random_state=None):
self.estimator= clone(estimator)
self.k_feature= k_feature
self.scoring= scoring
self.random_state = random_state
self.test_size= test_size
def fit(self,x,y):
x_train,x_test,y_train,y_test= train_test_split(x,y,test_size=self.test_size,
random_state=self.random_state)
dim=x_train.shape[1]
#indices 目录 元组不能改变
#类内全局变量
self.indices_= tuple(range(dim))
#子集subset
self.subsets_=[self.indices_]
score= self._calc_score(x_train,y_train,x_test,y_test,self.indices_)
#数组的一个元素
self.scores_= [score]
while dim > self.k_feature:
scores=[]
subsets= []
for p in combinations(self.indices_, r=dim-1):
score= self._calc_score(x_train,y_train,x_test,y_test,p)
scores.append(score)
#子集存储
subsets.append(p)
#argmax返回值是最大值的indices
best= np.argmax(score)
#返回的是最优的子集即score最大的子集目录即列向量标号
self.indices_= subsets[best]
self.subsets_.append(self.indices_)
dim -=1
#存储的是score最大的子集
self.scores_.append(scores[best])
#返回的是满足阈值的最佳score
self.k_scores_=self.scores_[-1]
return self
#返回最佳的特征列
def transform(self,x):
return x[:,self.indices_]
def _calc_score(self,x_train,y_train,x_test,y_test,indices):
self.estimator.fit(x_train[:,indices],y_train)
y_predict= self.estimator.predict(x_test[:,indices])
#实际是调用accuracy_score 进行正确率
score= self.scoring(y_test,y_predict)
return score
def bestchoice(self):
best= np.argmax(self.scores_)
return self.subsets_[best]
| #序列反向选择算法sbs
from sklearn.base import clone
#itertools迭代器产生
from itertools import combinations
from sklearn.metrics import accuracy_score
import numpy as np
from sklearn.cross_validation import train_test_split
class SBS(object):
'''
estimator 是采用的方法分类后的模型
'''
def __init__(self,estimator, k_feature, scoring=accuracy_score,
test_size= None ,random_state=None):
self.estimator= clone(estimator)
self.k_feature= k_feature
self.scoring= scoring
self.random_state = random_state
self.test_size= test_size
def fit(self,x,y):
x_train,x_test,y_train,y_test= train_test_split(x,y,test_size=self.test_size,
random_state=self.random_state)
dim=x_train.shape[1]
#indices 目录 元组不能改变
#类内全局变量
self.indices_= tuple(range(dim))
#子集subset
self.subsets_=[self.indices_]
score= self._calc_score(x_train,y_train,x_test,y_test,self.indices_)
#数组的一个元素
self.scores_= [score]
while dim > self.k_feature:
scores=[]
subsets= []
for p in combinations(self.indices_, r=dim-1):
score= self._calc_score(x_train,y_train,x_test,y_test,p)
scores.append(score)
#子集存储
subsets.append(p)
#argmax返回值是最大值的indices
best= np.argmax(score)
#返回的是最优的子集即score最大的子集目录即列向量标号
self.indices_= subsets[best]
self.subsets_.append(self.indices_)
dim -=1
#存储的是score最大的子集
self.scores_.append(scores[best])
#返回的是满足阈值的最佳score
self.k_scores_=self.scores_[-1]
return self
#返回最佳的特征列
def transform(self,x):
return x[:,self.indices_]
def _calc_score(self,x_train,y_train,x_test,y_test,indices):
self.estimator.fit(x_train[:,indices],y_train)
y_predict= self.estimator.predict(x_test[:,indices])
#实际是调用accuracy_score 进行正确率
score= self.scoring(y_test,y_predict)
return score
def bestchoice(self):
best= np.argmax(self.scores_)
return self.subsets_[best]
| zh | 0.971369 | #序列反向选择算法sbs #itertools迭代器产生 estimator 是采用的方法分类后的模型 #indices 目录 元组不能改变 #类内全局变量 #子集subset #数组的一个元素 #子集存储 #argmax返回值是最大值的indices #返回的是最优的子集即score最大的子集目录即列向量标号 #存储的是score最大的子集 #返回的是满足阈值的最佳score #返回最佳的特征列 #实际是调用accuracy_score 进行正确率 | 2.654305 | 3 |
rama/config.py | tadfisher/rama | 2 | 6632998 | import layout
import view
defaults = {
'layouts': [layout.TileLayout()],
'views': ['main']
}
| import layout
import view
defaults = {
'layouts': [layout.TileLayout()],
'views': ['main']
}
| none | 1 | 1.324789 | 1 |
|
scrapeops_python_logger/utils/error_handling.py | ScrapeOps/scrapeops-python-logger | 0 | 6632999 | <filename>scrapeops_python_logger/utils/error_handling.py
import functools
from scrapeops_python_logger.exceptions import ScrapeOpsAPIResponseError
def exception_handler(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ScrapeOpsAPIResponseError as e:
pass
except Exception as e:
pass
return wrapper
| <filename>scrapeops_python_logger/utils/error_handling.py
import functools
from scrapeops_python_logger.exceptions import ScrapeOpsAPIResponseError
def exception_handler(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ScrapeOpsAPIResponseError as e:
pass
except Exception as e:
pass
return wrapper
| none | 1 | 2.59916 | 3 |
|
gdsfactory/components/pack_doe.py | thomasdorch/gdsfactory | 0 | 6633000 | import itertools as it
from typing import Any, Dict, List
import gdsfactory as gf
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.grid import grid, grid_with_text
from gdsfactory.pack import pack
from gdsfactory.types import CellSpec, ComponentSpec, Optional
@cell
def pack_doe(
doe: ComponentSpec,
settings: Dict[str, List[Any]],
do_permutations: bool = False,
function: Optional[CellSpec] = None,
**kwargs,
) -> Component:
"""Packs a component DOE (Design of Experiment) using pack.
Args:
doe: function to return Components.
settings: component settings.
do_permutations: for each setting.
function: for the component (add padding, grating couplers ...)
keyword Args:
spacing: Minimum distance between adjacent shapes
aspect_ratio: (width, height) ratio of the rectangular bin
max_size: Limits the size into which the shapes will be packed
sort_by_area: Pre-sorts the shapes by area
density: Values closer to 1 pack tighter but require more computation
precision: Desired precision for rounding vertex coordinates.
text: Optional function to add text labels.
text_prefix: for labels. For example. 'A' will produce 'A1', 'A2', ...
text_offsets: relative to component size info anchor. Defaults to center.
text_anchors: relative to component (ce cw nc ne nw sc se sw center cc).
name_prefix: for each packed component (avoids the Unnamed cells warning).
Note that the suffix contains a uuid so the name will not be deterministic
rotation: for each component in degrees
h_mirror: horizontal mirror in y axis (x, 1) (1, 0). This is the most common.
v_mirror: vertical mirror using x axis (1, y) (0, y)
"""
if do_permutations:
settings_list = [dict(zip(settings, t)) for t in it.product(*settings.values())]
else:
settings_list = [dict(zip(settings, t)) for t in zip(*settings.values())]
if function:
function = gf.get_cell(function)
if not callable(function):
raise ValueError(f"Error {function!r} needs to be callable.")
component_list = [
function(gf.get_component(doe, **settings)) for settings in settings_list
]
else:
component_list = [
gf.get_component(doe, **settings) for settings in settings_list
]
c = pack(component_list=component_list, **kwargs)
if len(c) > 1:
raise ValueError(
f"failed to pack in one Component, it created {len(c)} Components"
)
else:
c = c[0]
c.doe_names = [component.name for component in component_list]
c.doe_settings = settings_list
return c
def pack_doe_grid(
doe: ComponentSpec,
settings: Dict[str, List[Any]],
do_permutations: bool = False,
function: Optional[CellSpec] = None,
with_text: bool = False,
**kwargs,
) -> Component:
"""Packs a component DOE (Design of Experiment) using grid.
Args:
component: function to return Components.
settings: component settings.
do_permutations: for each setting.
function: for the component (add padding, grating couplers ...)
with_text: includes text label.
keyword Args:
spacing: between adjacent elements on the grid, can be a tuple for
different distances in height and width.
separation: If True, guarantees elements are speparated with fixed spacing
if False, elements are spaced evenly along a grid.
shape: x, y shape of the grid (see np.reshape).
If no shape and the list is 1D, if np.reshape were run with (1, -1).
align_x: {'x', 'xmin', 'xmax'} for x (column) alignment along
align_y: {'y', 'ymin', 'ymax'} for y (row) alignment along
edge_x: {'x', 'xmin', 'xmax'} for x (column) (ignored if separation = True)
edge_y: {'y', 'ymin', 'ymax'} for y (row) (ignored if separation = True)
rotation: for each component in degrees.
h_mirror: horizontal mirror y axis (x, 1) (1, 0). most common mirror.
v_mirror: vertical mirror using x axis (1, y) (0, y).
"""
if do_permutations:
settings_list = [dict(zip(settings, t)) for t in it.product(*settings.values())]
else:
settings_list = [dict(zip(settings, t)) for t in zip(*settings.values())]
if function:
function = gf.get_cell(function)
if not callable(function):
raise ValueError(f"Error {function!r} needs to be callable.")
component_list = [
function(gf.get_component(doe, **settings)) for settings in settings_list
]
else:
component_list = [
gf.get_component(doe, **settings) for settings in settings_list
]
if with_text:
c = grid_with_text(component_list, **kwargs)
else:
c = grid(component_list, **kwargs)
c.doe_names = [component.name for component in component_list]
c.doe_settings = settings_list
return c
if __name__ == "__main__":
c = pack_doe_grid(
# doe=gf.c.mmi1x2,
doe="mmi1x2",
# doe=dict(component='mmi1x2', settings=dict(length_taper=50)),
settings=dict(length_mmi=[2.5, 100], width_mmi=[4, 10], hash_settings=[False]),
with_text=True,
spacing=(100, 100),
shape=(2, 2),
# settings=dict(length_mmi=[2, 100], width_mmi=[4, 10]),
do_permutations=True,
)
print(c.doe_names)
c.show()
# c = pack_doe(doe="mmi1x2", settings=dict(length_mmi=[2, 100], width_mmi=[4, 10]))
| import itertools as it
from typing import Any, Dict, List
import gdsfactory as gf
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.grid import grid, grid_with_text
from gdsfactory.pack import pack
from gdsfactory.types import CellSpec, ComponentSpec, Optional
@cell
def pack_doe(
doe: ComponentSpec,
settings: Dict[str, List[Any]],
do_permutations: bool = False,
function: Optional[CellSpec] = None,
**kwargs,
) -> Component:
"""Packs a component DOE (Design of Experiment) using pack.
Args:
doe: function to return Components.
settings: component settings.
do_permutations: for each setting.
function: for the component (add padding, grating couplers ...)
keyword Args:
spacing: Minimum distance between adjacent shapes
aspect_ratio: (width, height) ratio of the rectangular bin
max_size: Limits the size into which the shapes will be packed
sort_by_area: Pre-sorts the shapes by area
density: Values closer to 1 pack tighter but require more computation
precision: Desired precision for rounding vertex coordinates.
text: Optional function to add text labels.
text_prefix: for labels. For example. 'A' will produce 'A1', 'A2', ...
text_offsets: relative to component size info anchor. Defaults to center.
text_anchors: relative to component (ce cw nc ne nw sc se sw center cc).
name_prefix: for each packed component (avoids the Unnamed cells warning).
Note that the suffix contains a uuid so the name will not be deterministic
rotation: for each component in degrees
h_mirror: horizontal mirror in y axis (x, 1) (1, 0). This is the most common.
v_mirror: vertical mirror using x axis (1, y) (0, y)
"""
if do_permutations:
settings_list = [dict(zip(settings, t)) for t in it.product(*settings.values())]
else:
settings_list = [dict(zip(settings, t)) for t in zip(*settings.values())]
if function:
function = gf.get_cell(function)
if not callable(function):
raise ValueError(f"Error {function!r} needs to be callable.")
component_list = [
function(gf.get_component(doe, **settings)) for settings in settings_list
]
else:
component_list = [
gf.get_component(doe, **settings) for settings in settings_list
]
c = pack(component_list=component_list, **kwargs)
if len(c) > 1:
raise ValueError(
f"failed to pack in one Component, it created {len(c)} Components"
)
else:
c = c[0]
c.doe_names = [component.name for component in component_list]
c.doe_settings = settings_list
return c
def pack_doe_grid(
doe: ComponentSpec,
settings: Dict[str, List[Any]],
do_permutations: bool = False,
function: Optional[CellSpec] = None,
with_text: bool = False,
**kwargs,
) -> Component:
"""Packs a component DOE (Design of Experiment) using grid.
Args:
component: function to return Components.
settings: component settings.
do_permutations: for each setting.
function: for the component (add padding, grating couplers ...)
with_text: includes text label.
keyword Args:
spacing: between adjacent elements on the grid, can be a tuple for
different distances in height and width.
separation: If True, guarantees elements are speparated with fixed spacing
if False, elements are spaced evenly along a grid.
shape: x, y shape of the grid (see np.reshape).
If no shape and the list is 1D, if np.reshape were run with (1, -1).
align_x: {'x', 'xmin', 'xmax'} for x (column) alignment along
align_y: {'y', 'ymin', 'ymax'} for y (row) alignment along
edge_x: {'x', 'xmin', 'xmax'} for x (column) (ignored if separation = True)
edge_y: {'y', 'ymin', 'ymax'} for y (row) (ignored if separation = True)
rotation: for each component in degrees.
h_mirror: horizontal mirror y axis (x, 1) (1, 0). most common mirror.
v_mirror: vertical mirror using x axis (1, y) (0, y).
"""
if do_permutations:
settings_list = [dict(zip(settings, t)) for t in it.product(*settings.values())]
else:
settings_list = [dict(zip(settings, t)) for t in zip(*settings.values())]
if function:
function = gf.get_cell(function)
if not callable(function):
raise ValueError(f"Error {function!r} needs to be callable.")
component_list = [
function(gf.get_component(doe, **settings)) for settings in settings_list
]
else:
component_list = [
gf.get_component(doe, **settings) for settings in settings_list
]
if with_text:
c = grid_with_text(component_list, **kwargs)
else:
c = grid(component_list, **kwargs)
c.doe_names = [component.name for component in component_list]
c.doe_settings = settings_list
return c
if __name__ == "__main__":
c = pack_doe_grid(
# doe=gf.c.mmi1x2,
doe="mmi1x2",
# doe=dict(component='mmi1x2', settings=dict(length_taper=50)),
settings=dict(length_mmi=[2.5, 100], width_mmi=[4, 10], hash_settings=[False]),
with_text=True,
spacing=(100, 100),
shape=(2, 2),
# settings=dict(length_mmi=[2, 100], width_mmi=[4, 10]),
do_permutations=True,
)
print(c.doe_names)
c.show()
# c = pack_doe(doe="mmi1x2", settings=dict(length_mmi=[2, 100], width_mmi=[4, 10]))
| en | 0.700334 | Packs a component DOE (Design of Experiment) using pack. Args: doe: function to return Components. settings: component settings. do_permutations: for each setting. function: for the component (add padding, grating couplers ...) keyword Args: spacing: Minimum distance between adjacent shapes aspect_ratio: (width, height) ratio of the rectangular bin max_size: Limits the size into which the shapes will be packed sort_by_area: Pre-sorts the shapes by area density: Values closer to 1 pack tighter but require more computation precision: Desired precision for rounding vertex coordinates. text: Optional function to add text labels. text_prefix: for labels. For example. 'A' will produce 'A1', 'A2', ... text_offsets: relative to component size info anchor. Defaults to center. text_anchors: relative to component (ce cw nc ne nw sc se sw center cc). name_prefix: for each packed component (avoids the Unnamed cells warning). Note that the suffix contains a uuid so the name will not be deterministic rotation: for each component in degrees h_mirror: horizontal mirror in y axis (x, 1) (1, 0). This is the most common. v_mirror: vertical mirror using x axis (1, y) (0, y) Packs a component DOE (Design of Experiment) using grid. Args: component: function to return Components. settings: component settings. do_permutations: for each setting. function: for the component (add padding, grating couplers ...) with_text: includes text label. keyword Args: spacing: between adjacent elements on the grid, can be a tuple for different distances in height and width. separation: If True, guarantees elements are speparated with fixed spacing if False, elements are spaced evenly along a grid. shape: x, y shape of the grid (see np.reshape). If no shape and the list is 1D, if np.reshape were run with (1, -1). align_x: {'x', 'xmin', 'xmax'} for x (column) alignment along align_y: {'y', 'ymin', 'ymax'} for y (row) alignment along edge_x: {'x', 'xmin', 'xmax'} for x (column) (ignored if separation = True) edge_y: {'y', 'ymin', 'ymax'} for y (row) (ignored if separation = True) rotation: for each component in degrees. h_mirror: horizontal mirror y axis (x, 1) (1, 0). most common mirror. v_mirror: vertical mirror using x axis (1, y) (0, y). # doe=gf.c.mmi1x2, # doe=dict(component='mmi1x2', settings=dict(length_taper=50)), # settings=dict(length_mmi=[2, 100], width_mmi=[4, 10]), # c = pack_doe(doe="mmi1x2", settings=dict(length_mmi=[2, 100], width_mmi=[4, 10])) | 2.23235 | 2 |
scripts/usefullFunctions.py | pete-usds/opal | 16 | 6633001 | <reponame>pete-usds/opal<gh_stars>10-100
from opal.settings import BASE_DIR
import logging
from rest_framework.renderers import JSONRenderer
import json
import os
def startLogging():
logging.basicConfig( # filename=logFile,
filemode='w',
format='%(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG
)
def addControlsToGroup(group_name,controls):
"""
:param group_name: The name of a new group of controls. Cold be a new baseline or a common set of controls such as those addressed by a particular component
:param controls: a list object contining one or more system_control objects
"""
from ssp.models import element_property, system_control
p = element_property.objects.get_or_create(ns='control_group', name=group_name, value='true')
for item in controls:
item.properties.add(p[0])
item.save()
# These are some useful functions for cleaning up data after an import
def changeRoll(old_role,new_role):
from ssp.models import system_control, user_role
controls = system_control.objects.filter(responsibleRoles=user_role.objects.filter(title=old_role)[0].pk)
for item in controls:
item.responsibleRoles.add(user_role.objects.filter(title=new_role)[0].pk)
item.responsibleRoles.remove(user_role.objects.filter(title=old_role)[0].pk)
user_role.objects.filter(title=old_role)[0].delete()
def delUnusedRoles():
from ssp.models import user_role
r = user_role.objects.all()
for item in r:
if item.system_control_set.count() == 0:
print('deleting ' + item.title)
item.delete()
def listRolesWithControlCount():
from ssp.models import user_role
r = user_role.objects.all()
role_dictionary = {}
for role in r:
role_dictionary[role.title] = role.control_statement_set.count()
sort_roles = sorted(role_dictionary.items(), key=lambda x: x[1], reverse=True)
for i in sort_roles:
print(i[0], i[1])
def linkSystemControltoNISTControl(catalog):
from ssp.models.controls import system_control, nist_control
logging.debug("Stsrting...")
for item, key in system_control.objects.all().values_list('nist_control', 'pk'):
control = system_control.objects.get(pk=key)
logging.debug('Opened control ' + control.title)
nist_control_id = control.short_name
logging.debug('Looking up ' + nist_control_id)
try:
control.nist_control = nist_control.objects.get(sort_id=nist_control_id,catalog=catalog)
control.save()
logging.debug('Found nist control, link established')
except nist_control.DoesNotExist:
logging.debug(nist_control_id + ' not found')
def createFixtures():
import os
from django.apps import apps
fixture_dir = os.path.join(BASE_DIR, 'ssp/fixtures/')
app_models = apps.get_app_config('ssp').get_models()
for model in app_models:
if len(model.objects.all()) > 0:
cmd = 'python manage.py dumpdata ssp.' + model.__name__ + ' --natural-foreign --natural-primary -o ' + fixture_dir + model.__name__ + '.json'
os.system(cmd)
def serializerJSON(data, SSP=False):
json_data = JSONRenderer().render(data)
json_object = json.loads(json_data)
json_str = json.dumps(json_object, indent=2)
return aliasOSCAL(json_str, SSP)
def aliasOSCAL(json_str, SSP=False):
json_str = json_str.replace('"short_name":', '"short-name":')
json_str = json_str.replace('"telephone_numbers:"', '"telephone-numbers":')
json_str = json_str.replace('"email_addresses":', '"email-addresses":')
json_str = json_str.replace('"lastModified":', '"last-modified":')
json_str = json_str.replace('"oscalVersion":', '"oscal-version":')
json_str = json_str.replace('"desc":', '"description":')
json_str = json_str.replace('Impact":', '-impact":')
json_str = json_str.replace('"system-status":', '"status":')
json_str = json_str.replace('"authorization_boundary_diagram":', '"authorization-boundary":')
json_str = json_str.replace('"network_architecture_diagram":', '"network-architecture":')
json_str = json_str.replace('"data_flow_diagram":', '"data-flow":')
json_str = json_str.replace('"leveraged_authorization":', '"leveraged-authorizations":')
json_str = json_str.replace('"system_users":', '"users":')
json_str = json_str.replace('"system_components":', '"components":')
json_str = json_str.replace('"system_inventory_items":', '"inventory-items":')
json_str = json_str.replace('"system_characteristics":', '"system-characteristics":')
json_str = json_str.replace('"date_authorized":', '"date-authorized":')
json_str = json_str.replace('"security_sensitivity_level":', '"security-sensitivity-level":')
json_str = json_str.replace('"system_information":', '"system-information":')
json_str = json_str.replace('"information_types":', '"information-types":')
json_str = json_str.replace('"security_impact_level":', '"security-impact-level":')
json_str = json_str.replace('"security_objective_confidentiality":', '"security-objective-confidentiality":')
json_str = json_str.replace('"security_objective_integrity":', '"security-objective-integrity":')
json_str = json_str.replace('"security_objective_availability":', '"security-objective-availability":')
json_str = json_str.replace('"system_status":', '"system-status":')
json_str = json_str.replace('"system_implementation":', '"system-implementation":')
json_str = json_str.replace('"component_type":', '"component-type":')
json_str = json_str.replace('"component_title":', '"component-title":')
json_str = json_str.replace('"component_description":', '"component-description":')
json_str = json_str.replace('"component_information_types":', '"component-information-types":')
json_str = json_str.replace('"component_status":', '"component-status":')
json_str = json_str.replace('"component_responsible_roles":', '"component-responsible-roles":')
json_str = json_str.replace('"control_implementation":', '"control-implementation":')
json_str = json_str.replace('"control_parameters":', '"parameter-settings":')
json_str = json_str.replace('"control_statements":', '"statements":')
json_str = json_str.replace('"system_name":', '"system-name":')
if SSP:
json_str = json_str.replace('"controls": [', '"implemented-requirements": [')
json_str = json_str.replace('"properties":', '"props":')
return json_str
def validate_file_extension(filename, extension):
ext = os.path.splitext(filename)[1] # [0] returns path+filename
#valid_extensions = ['.pdf', '.doc', '.docx', '.jpg', '.png', '.xlsx', '.xls']
if ext.lower() != extension:
return False
else:
return True
| from opal.settings import BASE_DIR
import logging
from rest_framework.renderers import JSONRenderer
import json
import os
def startLogging():
logging.basicConfig( # filename=logFile,
filemode='w',
format='%(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG
)
def addControlsToGroup(group_name,controls):
"""
:param group_name: The name of a new group of controls. Cold be a new baseline or a common set of controls such as those addressed by a particular component
:param controls: a list object contining one or more system_control objects
"""
from ssp.models import element_property, system_control
p = element_property.objects.get_or_create(ns='control_group', name=group_name, value='true')
for item in controls:
item.properties.add(p[0])
item.save()
# These are some useful functions for cleaning up data after an import
def changeRoll(old_role,new_role):
from ssp.models import system_control, user_role
controls = system_control.objects.filter(responsibleRoles=user_role.objects.filter(title=old_role)[0].pk)
for item in controls:
item.responsibleRoles.add(user_role.objects.filter(title=new_role)[0].pk)
item.responsibleRoles.remove(user_role.objects.filter(title=old_role)[0].pk)
user_role.objects.filter(title=old_role)[0].delete()
def delUnusedRoles():
from ssp.models import user_role
r = user_role.objects.all()
for item in r:
if item.system_control_set.count() == 0:
print('deleting ' + item.title)
item.delete()
def listRolesWithControlCount():
from ssp.models import user_role
r = user_role.objects.all()
role_dictionary = {}
for role in r:
role_dictionary[role.title] = role.control_statement_set.count()
sort_roles = sorted(role_dictionary.items(), key=lambda x: x[1], reverse=True)
for i in sort_roles:
print(i[0], i[1])
def linkSystemControltoNISTControl(catalog):
from ssp.models.controls import system_control, nist_control
logging.debug("Stsrting...")
for item, key in system_control.objects.all().values_list('nist_control', 'pk'):
control = system_control.objects.get(pk=key)
logging.debug('Opened control ' + control.title)
nist_control_id = control.short_name
logging.debug('Looking up ' + nist_control_id)
try:
control.nist_control = nist_control.objects.get(sort_id=nist_control_id,catalog=catalog)
control.save()
logging.debug('Found nist control, link established')
except nist_control.DoesNotExist:
logging.debug(nist_control_id + ' not found')
def createFixtures():
import os
from django.apps import apps
fixture_dir = os.path.join(BASE_DIR, 'ssp/fixtures/')
app_models = apps.get_app_config('ssp').get_models()
for model in app_models:
if len(model.objects.all()) > 0:
cmd = 'python manage.py dumpdata ssp.' + model.__name__ + ' --natural-foreign --natural-primary -o ' + fixture_dir + model.__name__ + '.json'
os.system(cmd)
def serializerJSON(data, SSP=False):
json_data = JSONRenderer().render(data)
json_object = json.loads(json_data)
json_str = json.dumps(json_object, indent=2)
return aliasOSCAL(json_str, SSP)
def aliasOSCAL(json_str, SSP=False):
json_str = json_str.replace('"short_name":', '"short-name":')
json_str = json_str.replace('"telephone_numbers:"', '"telephone-numbers":')
json_str = json_str.replace('"email_addresses":', '"email-addresses":')
json_str = json_str.replace('"lastModified":', '"last-modified":')
json_str = json_str.replace('"oscalVersion":', '"oscal-version":')
json_str = json_str.replace('"desc":', '"description":')
json_str = json_str.replace('Impact":', '-impact":')
json_str = json_str.replace('"system-status":', '"status":')
json_str = json_str.replace('"authorization_boundary_diagram":', '"authorization-boundary":')
json_str = json_str.replace('"network_architecture_diagram":', '"network-architecture":')
json_str = json_str.replace('"data_flow_diagram":', '"data-flow":')
json_str = json_str.replace('"leveraged_authorization":', '"leveraged-authorizations":')
json_str = json_str.replace('"system_users":', '"users":')
json_str = json_str.replace('"system_components":', '"components":')
json_str = json_str.replace('"system_inventory_items":', '"inventory-items":')
json_str = json_str.replace('"system_characteristics":', '"system-characteristics":')
json_str = json_str.replace('"date_authorized":', '"date-authorized":')
json_str = json_str.replace('"security_sensitivity_level":', '"security-sensitivity-level":')
json_str = json_str.replace('"system_information":', '"system-information":')
json_str = json_str.replace('"information_types":', '"information-types":')
json_str = json_str.replace('"security_impact_level":', '"security-impact-level":')
json_str = json_str.replace('"security_objective_confidentiality":', '"security-objective-confidentiality":')
json_str = json_str.replace('"security_objective_integrity":', '"security-objective-integrity":')
json_str = json_str.replace('"security_objective_availability":', '"security-objective-availability":')
json_str = json_str.replace('"system_status":', '"system-status":')
json_str = json_str.replace('"system_implementation":', '"system-implementation":')
json_str = json_str.replace('"component_type":', '"component-type":')
json_str = json_str.replace('"component_title":', '"component-title":')
json_str = json_str.replace('"component_description":', '"component-description":')
json_str = json_str.replace('"component_information_types":', '"component-information-types":')
json_str = json_str.replace('"component_status":', '"component-status":')
json_str = json_str.replace('"component_responsible_roles":', '"component-responsible-roles":')
json_str = json_str.replace('"control_implementation":', '"control-implementation":')
json_str = json_str.replace('"control_parameters":', '"parameter-settings":')
json_str = json_str.replace('"control_statements":', '"statements":')
json_str = json_str.replace('"system_name":', '"system-name":')
if SSP:
json_str = json_str.replace('"controls": [', '"implemented-requirements": [')
json_str = json_str.replace('"properties":', '"props":')
return json_str
def validate_file_extension(filename, extension):
ext = os.path.splitext(filename)[1] # [0] returns path+filename
#valid_extensions = ['.pdf', '.doc', '.docx', '.jpg', '.png', '.xlsx', '.xls']
if ext.lower() != extension:
return False
else:
return True | en | 0.73372 | # filename=logFile, :param group_name: The name of a new group of controls. Cold be a new baseline or a common set of controls such as those addressed by a particular component :param controls: a list object contining one or more system_control objects # These are some useful functions for cleaning up data after an import # [0] returns path+filename #valid_extensions = ['.pdf', '.doc', '.docx', '.jpg', '.png', '.xlsx', '.xls'] | 2.336493 | 2 |
config.example.py | Cyanoxygen/arcaea-mp | 3 | 6633002 | <filename>config.example.py
threshold = 200 # in second
| <filename>config.example.py
threshold = 200 # in second
| en | 0.976727 | # in second | 1.160527 | 1 |
amass/commands/remove/source/__init__.py | sayan-rc/amass | 0 | 6633003 | import amass
class Command(amass.commands.Command):
is_command = False
def __init__(self):
amass.commands.Command.__init__(self)
self.file = __file__ | import amass
class Command(amass.commands.Command):
is_command = False
def __init__(self):
amass.commands.Command.__init__(self)
self.file = __file__ | none | 1 | 2.213302 | 2 |
|
fintech/fda/views.py | fpark7/cs3240-s17-team31 | 0 | 6633004 | <gh_stars>0
from django.shortcuts import render
from django.shortcuts import render
from django.shortcuts import render
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout
from django.db import models
from django.contrib.auth.models import Group, Permission
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import user_passes_test
from django.views.decorators.csrf import csrf_exempt
from .models import *
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.db import IntegrityError
from newsletter.models import *
import os
import json
@csrf_exempt
def fdaLogin(request):
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request=request, username=username, password=password)
if user is not None:
login(request, user) # actually does nothing
return JsonResponse({'verification': True})
else:
return JsonResponse({'verification': False})
@csrf_exempt
def getReportsList(request):
username = request.POST.get('username')
user = User.objects.get(username=username)
reports = Report.objects.all()
viewable_reports = []
group_names = []
for g in user.groups.all():
group_names.append(g.name)
if user.is_superuser:
for report in reports:
viewable_reports.append(report)
else:
for report in reports:
if report.is_private == 'N' or report.group in group_names or report.owner == user.username:
viewable_reports.append(report)
data = {}
reports_list = []
for report in viewable_reports:
# ADD INDUSTRY ONCE WE UPDATE THE MODEL AND FORMS
# I am also passing report.id to be smart
# content will be downloaded upon request in the client fda later
content_list = []
for file_obj in report.content.all():
content_list.append({'file_name': file_obj.file.name, 'file_status': file_obj.encrypted})
r_dict = {'owner': report.owner, 'group': report.group, 'timestamp': report.timestamp,
'is_private': report.is_private, 'company_name': report.company_name, 'company_phone': report.company_Phone,
'company_location': report.company_location, 'company_country': report.company_country,
'sector': report.sector, 'projects': report.projects, 'ceo_name': report.ceo_name,
'id': report.id, 'industry': report.industry, 'company_email': report.company_email, 'content': content_list}
reports_list.append(r_dict)
data['reports_list'] = reports_list
return JsonResponse({'reports_list': reports_list})
@csrf_exempt
def getFile(request):
pass | from django.shortcuts import render
from django.shortcuts import render
from django.shortcuts import render
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout
from django.db import models
from django.contrib.auth.models import Group, Permission
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import user_passes_test
from django.views.decorators.csrf import csrf_exempt
from .models import *
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.db import IntegrityError
from newsletter.models import *
import os
import json
@csrf_exempt
def fdaLogin(request):
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request=request, username=username, password=password)
if user is not None:
login(request, user) # actually does nothing
return JsonResponse({'verification': True})
else:
return JsonResponse({'verification': False})
@csrf_exempt
def getReportsList(request):
username = request.POST.get('username')
user = User.objects.get(username=username)
reports = Report.objects.all()
viewable_reports = []
group_names = []
for g in user.groups.all():
group_names.append(g.name)
if user.is_superuser:
for report in reports:
viewable_reports.append(report)
else:
for report in reports:
if report.is_private == 'N' or report.group in group_names or report.owner == user.username:
viewable_reports.append(report)
data = {}
reports_list = []
for report in viewable_reports:
# ADD INDUSTRY ONCE WE UPDATE THE MODEL AND FORMS
# I am also passing report.id to be smart
# content will be downloaded upon request in the client fda later
content_list = []
for file_obj in report.content.all():
content_list.append({'file_name': file_obj.file.name, 'file_status': file_obj.encrypted})
r_dict = {'owner': report.owner, 'group': report.group, 'timestamp': report.timestamp,
'is_private': report.is_private, 'company_name': report.company_name, 'company_phone': report.company_Phone,
'company_location': report.company_location, 'company_country': report.company_country,
'sector': report.sector, 'projects': report.projects, 'ceo_name': report.ceo_name,
'id': report.id, 'industry': report.industry, 'company_email': report.company_email, 'content': content_list}
reports_list.append(r_dict)
data['reports_list'] = reports_list
return JsonResponse({'reports_list': reports_list})
@csrf_exempt
def getFile(request):
pass | en | 0.877782 | # actually does nothing # ADD INDUSTRY ONCE WE UPDATE THE MODEL AND FORMS # I am also passing report.id to be smart # content will be downloaded upon request in the client fda later | 2.104918 | 2 |
NoiseAdder.py | neu-spiral/GraphMatching | 0 | 6633005 | <filename>NoiseAdder.py<gh_stars>0
import numpy as np
import pickle
import random
import argparse
if __name__=="__main__":
parser = argparse.ArgumentParser(description = 'Graph Preprocessor .',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('graph',help = 'File containing the graph')
parser.add_argument('out',help = 'File to store the permutated graph')
parser.add_argument('size',type=int,help='Graph size')
parser.add_argument('--scale',type=float, default=0.001,help='The standard deviation of noise..')
snap_group = parser.add_mutually_exclusive_group(required=False)
snap_group.add_argument('--fromsnap', dest='fromsnap', action='store_true',help="Inputfiles are from SNAP")
snap_group.add_argument('--notfromsnap', dest='fromsnap', action='store_false',help="Inputfiles are pre-formatted")
parser.set_defaults(fromsnap=True)
parser.add_argument('--noise', choices=['normal', 'laplace', 'both'], help="Noise type")
parser.add_argument('--mix_noise_weight', type=float, default=0.5, help="The coeff. of normal distributed weights, only relevant if noise is set to 'both'.")
args = parser.parse_args()
weights = {}
#generate weights
for i in range(args.size):
for j in range(args.size):
if (j,i) in weights or (i,j) in weights:
continue
if args.noise == 'normal':
weights[(i,j)] = np.random.normal(loc=0.0,scale=args.scale)
elif args.noise == 'laplace':
weights[(i,j)] = np.random.laplace(loc=0.0,scale=args.scale)
elif args.noise == 'both':
weights[(i,j)] = args.mix_noise_weight * np.random.normal(loc=0.0,scale=args.scale) + (1-args.mix_noise_weight) * np.random.laplace(loc=0.0,scale=args.scale)
weights[(j,i)] = weights[(i,j)]
print (weights[(22, 55)], weights[(55, 22)])
out_file_name = args.out + '_weights_' + args.noise + str(args.scale)
if args.noise == 'both':
out_file_name += '_mixcoeff' + str(args.mix_noise_weight)
with open(out_file_name, 'wb') as fW:
pickle.dump(weights, fW)
| <filename>NoiseAdder.py<gh_stars>0
import numpy as np
import pickle
import random
import argparse
if __name__=="__main__":
parser = argparse.ArgumentParser(description = 'Graph Preprocessor .',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('graph',help = 'File containing the graph')
parser.add_argument('out',help = 'File to store the permutated graph')
parser.add_argument('size',type=int,help='Graph size')
parser.add_argument('--scale',type=float, default=0.001,help='The standard deviation of noise..')
snap_group = parser.add_mutually_exclusive_group(required=False)
snap_group.add_argument('--fromsnap', dest='fromsnap', action='store_true',help="Inputfiles are from SNAP")
snap_group.add_argument('--notfromsnap', dest='fromsnap', action='store_false',help="Inputfiles are pre-formatted")
parser.set_defaults(fromsnap=True)
parser.add_argument('--noise', choices=['normal', 'laplace', 'both'], help="Noise type")
parser.add_argument('--mix_noise_weight', type=float, default=0.5, help="The coeff. of normal distributed weights, only relevant if noise is set to 'both'.")
args = parser.parse_args()
weights = {}
#generate weights
for i in range(args.size):
for j in range(args.size):
if (j,i) in weights or (i,j) in weights:
continue
if args.noise == 'normal':
weights[(i,j)] = np.random.normal(loc=0.0,scale=args.scale)
elif args.noise == 'laplace':
weights[(i,j)] = np.random.laplace(loc=0.0,scale=args.scale)
elif args.noise == 'both':
weights[(i,j)] = args.mix_noise_weight * np.random.normal(loc=0.0,scale=args.scale) + (1-args.mix_noise_weight) * np.random.laplace(loc=0.0,scale=args.scale)
weights[(j,i)] = weights[(i,j)]
print (weights[(22, 55)], weights[(55, 22)])
out_file_name = args.out + '_weights_' + args.noise + str(args.scale)
if args.noise == 'both':
out_file_name += '_mixcoeff' + str(args.mix_noise_weight)
with open(out_file_name, 'wb') as fW:
pickle.dump(weights, fW)
| en | 0.798045 | #generate weights | 2.726728 | 3 |
pypy/module/imp/importing.py | olliemath/pypy | 0 | 6633006 | """
Implementation of the interpreter-level default import logic.
"""
import sys, os, stat, re, platform
from pypy.interpreter.module import Module, init_extra_module_attrs
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, generic_new_descr
from pypy.interpreter.error import OperationError, oefmt, wrap_oserror
from pypy.interpreter.baseobjspace import W_Root, CannotHaveLock
from pypy.interpreter.eval import Code
from pypy.interpreter.pycode import PyCode
from rpython.rlib import streamio, jit
from rpython.rlib.streamio import StreamErrors
from rpython.rlib.objectmodel import we_are_translated, specialize
from rpython.rlib.signature import signature
from rpython.rlib import rposix_stat, types
from pypy.module.sys.version import PYPY_VERSION, CPYTHON_VERSION
from pypy.module.__pypy__.interp_os import _multiarch
_WIN32 = sys.platform == 'win32'
SO = '.pyd' if _WIN32 else '.so'
PYC_TAG = 'pypy%d%d' % CPYTHON_VERSION[:2]
DEFAULT_SOABI_BASE = '%s-pp%d%d' % ((PYC_TAG,) + PYPY_VERSION[:2])
# see also pypy_incremental_magic in interpreter/pycode.py for the magic
# version number stored inside pyc files.
@specialize.memo()
def get_so_extension(space):
if space.config.objspace.soabi is not None:
soabi = space.config.objspace.soabi
else:
soabi = DEFAULT_SOABI_BASE
if not soabi:
return SO
if not space.config.translating:
soabi += 'i'
platform_name = sys.platform
if platform_name.startswith('linux'):
platform_name = _multiarch
elif platform_name == 'win32' and sys.maxsize > 2**32:
platform_name = 'win_amd64'
else:
# darwin?
pass
soabi += '-' + platform_name
result = '.' + soabi + SO
assert result == result.lower() # this is an implicit requirement of importlib on Windows!
return result
def has_so_extension(space):
return (space.config.objspace.usemodules.cpyext or
space.config.objspace.usemodules._cffi_backend)
def check_sys_modules(space, w_modulename):
return space.finditem(space.sys.get('modules'), w_modulename)
def check_sys_modules_w(space, modulename):
return space.finditem_str(space.sys.get('modules'), modulename)
lib_pypy = os.path.join(os.path.dirname(__file__),
'..', '..', '..', 'lib_pypy')
def _readall(space, filename):
try:
fd = os.open(filename, os.O_RDONLY, 0400)
try:
result = []
while True:
data = os.read(fd, 8192)
if not data:
break
result.append(data)
finally:
os.close(fd)
except OSError as e:
raise wrap_oserror(space, e, filename)
return ''.join(result)
@unwrap_spec(modulename='fsencode', level=int)
def importhook(space, modulename, w_globals=None, w_locals=None, w_fromlist=None, level=0):
# A minimal version, that can only import builtin and lib_pypy modules!
# The actual __import__ is
# pypy.module._frozenimportlib.interp_import.import_with_frames_removed
assert w_locals is w_globals
assert level == 0
w_mod = check_sys_modules_w(space, modulename)
if w_mod:
return w_mod
lock = getimportlock(space)
try:
lock.acquire_lock()
if modulename in space.builtin_modules:
return space.getbuiltinmodule(modulename)
ec = space.getexecutioncontext()
source = _readall(space, os.path.join(lib_pypy, modulename + '.py'))
pathname = "<frozen %s>" % modulename
code_w = ec.compiler.compile(source, pathname, 'exec', 0)
w_mod = add_module(space, space.newtext(modulename))
assert isinstance(w_mod, Module) # XXX why is that necessary?
space.setitem(space.sys.get('modules'), w_mod.w_name, w_mod)
space.setitem(w_mod.w_dict, space.newtext('__name__'), w_mod.w_name)
code_w.exec_code(space, w_mod.w_dict, w_mod.w_dict)
assert check_sys_modules_w(space, modulename)
finally:
lock.release_lock(silent_after_fork=True)
return w_mod
class _WIN32Path(object):
def __init__(self, path):
self.path = path
def as_unicode(self):
return self.path
def _prepare_module(space, w_mod, filename, pkgdir):
space.sys.setmodule(w_mod)
space.setattr(w_mod, space.newtext('__file__'), space.newfilename(filename))
space.setattr(w_mod, space.newtext('__doc__'), space.w_None)
if pkgdir is not None:
space.setattr(w_mod, space.newtext('__path__'),
space.newlist([space.newtext(pkgdir)]))
init_extra_module_attrs(space, w_mod)
def add_module(space, w_name):
w_mod = check_sys_modules(space, w_name)
if w_mod is None:
w_mod = Module(space, w_name)
init_extra_module_attrs(space, w_mod)
space.sys.setmodule(w_mod)
return w_mod
# __________________________________________________________________
#
# import lock, to prevent two threads from running module-level code in
# parallel. This behavior is more or less part of the language specs,
# as an attempt to avoid failure of 'from x import y' if module x is
# still being executed in another thread.
# This logic is tested in pypy.module.thread.test.test_import_lock.
class ImportRLock:
def __init__(self, space):
self.space = space
self.lock = None
self.lockowner = None
self.lockcounter = 0
def lock_held_by_someone_else(self):
me = self.space.getexecutioncontext() # used as thread ident
return self.lockowner is not None and self.lockowner is not me
def lock_held_by_anyone(self):
return self.lockowner is not None
def acquire_lock(self):
# this function runs with the GIL acquired so there is no race
# condition in the creation of the lock
if self.lock is None:
try:
self.lock = self.space.allocate_lock()
except CannotHaveLock:
return
me = self.space.getexecutioncontext() # used as thread ident
if self.lockowner is me:
pass # already acquired by the current thread
else:
self.lock.acquire(True)
assert self.lockowner is None
assert self.lockcounter == 0
self.lockowner = me
self.lockcounter += 1
def release_lock(self, silent_after_fork):
me = self.space.getexecutioncontext() # used as thread ident
if self.lockowner is not me:
if self.lockowner is None and silent_after_fork:
# Too bad. This situation can occur if a fork() occurred
# with the import lock held, and we're the child.
return
if self.lock is None: # CannotHaveLock occurred
return
space = self.space
raise oefmt(space.w_RuntimeError, "not holding the import lock")
assert self.lockcounter > 0
self.lockcounter -= 1
if self.lockcounter == 0:
self.lockowner = None
self.lock.release()
def reinit_lock(self):
# Called after fork() to ensure that newly created child
# processes do not share locks with the parent
# (Note that this runs after interp_imp.acquire_lock()
# done in the "before" fork hook, so that's why we decrease
# the lockcounter here)
if self.lockcounter > 1:
# Forked as a side effect of import
self.lock = self.space.allocate_lock()
me = self.space.getexecutioncontext()
self.lock.acquire(True)
# XXX: can the previous line fail?
self.lockowner = me
self.lockcounter -= 1
else:
self.lock = None
self.lockowner = None
self.lockcounter = 0
def getimportlock(space):
return space.fromcache(ImportRLock)
# __________________________________________________________________
#
# .pyc file support
"""
Magic word to reject .pyc files generated by other Python versions.
It should change for each incompatible change to the bytecode.
The value of CR and LF is incorporated so if you ever read or write
a .pyc file in text mode the magic number will be wrong; also, the
Apple MPW compiler swaps their values, botching string constants.
CPython 2 uses values between 20121 - 62xxx
CPython 3 uses values greater than 3000
PyPy uses values under 3000
"""
# Depending on which opcodes are enabled, eg. CALL_METHOD we bump the version
# number by some constant
#
# CPython + 0 -- used by CPython without the -U option
# CPython + 1 -- used by CPython with the -U option
# CPython + 7 = default_magic -- used by PyPy (incompatible!)
#
from pypy.interpreter.pycode import default_magic
MARSHAL_VERSION_FOR_PYC = 4
def get_pyc_magic(space):
return default_magic
def parse_source_module(space, pathname, source):
""" Parse a source file and return the corresponding code object """
ec = space.getexecutioncontext()
pycode = ec.compiler.compile(source, pathname, 'exec', 0)
return pycode
def exec_code_module(space, w_mod, code_w, pathname, cpathname,
write_paths=True):
w_dict = space.getattr(w_mod, space.newtext('__dict__'))
space.call_method(w_dict, 'setdefault',
space.newtext('__builtins__'),
space.builtin)
if write_paths:
if pathname is not None:
w_pathname = get_sourcefile(space, pathname)
else:
w_pathname = code_w.w_filename
if cpathname is not None:
w_cpathname = space.newfilename(cpathname)
else:
w_cpathname = space.w_None
space.setitem(w_dict, space.newtext("__file__"), w_pathname)
space.setitem(w_dict, space.newtext("__cached__"), w_cpathname)
#
# like PyImport_ExecCodeModuleObject(), we invoke
# _bootstrap_external._fix_up_module() here, which should try to
# fix a few more attributes (also __file__ and __cached__, but
# let's keep the logic that also sets them explicitly above, just
# in case)
space.appexec([w_dict, w_pathname, w_cpathname],
"""(d, pathname, cpathname):
from importlib._bootstrap_external import _fix_up_module
name = d.get('__name__')
if name is not None:
_fix_up_module(d, name, pathname, cpathname)
""")
#
code_w.exec_code(space, w_dict, w_dict)
def rightmost_sep(filename):
"Like filename.rfind('/'), but also search for \\."
index = filename.rfind(os.sep)
if os.altsep is not None:
index2 = filename.rfind(os.altsep)
index = max(index, index2)
return index
@signature(types.str0(), returns=types.str0())
def make_compiled_pathname(pathname):
"Given the path to a .py file, return the path to its .pyc file."
# foo.py -> __pycache__/foo.<tag>.pyc
lastpos = rightmost_sep(pathname) + 1
assert lastpos >= 0 # zero when slash, takes the full name
fname = pathname[lastpos:]
if lastpos > 0:
# Windows: re-use the last separator character (/ or \\) when
# appending the __pycache__ path.
lastsep = pathname[lastpos-1]
else:
lastsep = os.sep
ext = fname
for i in range(len(fname)):
if fname[i] == '.':
ext = fname[:i + 1]
result = (pathname[:lastpos] + "__pycache__" + lastsep +
ext + PYC_TAG + '.pyc')
return result
@signature(types.str0(), returns=types.any())
def make_source_pathname(pathname):
"Given the path to a .pyc file, return the path to its .py file."
# (...)/__pycache__/foo.<tag>.pyc -> (...)/foo.py
right = rightmost_sep(pathname)
if right < 0:
return None
left = rightmost_sep(pathname[:right]) + 1
assert left >= 0
if pathname[left:right] != '__pycache__':
return None
# Now verify that the path component to the right of the last
# slash has two dots in it.
rightpart = pathname[right + 1:]
dot0 = rightpart.find('.') + 1
if dot0 <= 0:
return None
dot1 = rightpart[dot0:].find('.') + 1
if dot1 <= 0:
return None
# Too many dots?
if rightpart[dot0 + dot1:].find('.') >= 0:
return None
result = pathname[:left] + rightpart[:dot0] + 'py'
return result
def get_sourcefile(space, filename):
start = len(filename) - 4
stop = len(filename) - 1
if not 0 <= start <= stop or filename[start:stop].lower() != ".py":
return space.newfilename(filename)
py = make_source_pathname(filename)
if py is None:
py = filename[:-1]
try:
st = os.stat(py)
except OSError:
pass
else:
if stat.S_ISREG(st.st_mode):
return space.newfilename(py)
return space.newfilename(filename)
def update_code_filenames(space, code_w, pathname, oldname=None):
assert isinstance(code_w, PyCode)
if oldname is None:
oldname = code_w.co_filename
elif code_w.co_filename != oldname:
return
code_w.co_filename = pathname
code_w.w_filename = space.newfilename(pathname)
constants = code_w.co_consts_w
for const in constants:
if const is not None and isinstance(const, PyCode):
update_code_filenames(space, const, pathname, oldname)
def _get_long(s):
a = ord(s[0])
b = ord(s[1])
c = ord(s[2])
d = ord(s[3])
if d >= 0x80:
d -= 0x100
return a | (b<<8) | (c<<16) | (d<<24)
def read_compiled_module(space, cpathname, strbuf):
""" Read a code object from a file and check it for validity """
w_marshal = space.getbuiltinmodule('marshal')
w_code = space.call_method(w_marshal, 'loads', space.newbytes(strbuf))
if not isinstance(w_code, Code):
raise oefmt(space.w_ImportError, "Non-code object in %s", cpathname)
return w_code
@jit.dont_look_inside
def load_compiled_module(space, w_modulename, w_mod, cpathname, magic,
source, write_paths=True):
"""
Load a module from a compiled file, execute it, and return its
module object.
"""
if magic != get_pyc_magic(space):
raise oefmt(space.w_ImportError, "Bad magic number in %s", cpathname)
#print "loading pyc file:", cpathname
code_w = read_compiled_module(space, cpathname, source)
optimize = space.sys.get_optimize()
if optimize >= 2:
code_w.remove_docstrings(space)
exec_code_module(space, w_mod, code_w, cpathname, cpathname, write_paths)
return w_mod
class FastPathGiveUp(Exception):
pass
def _gcd_import(space, name):
# check sys.modules, if the module is already there and initialized, we can
# use it, otherwise fall back to importlib.__import__
# NB: we don't get the importing lock here, but CPython has the same fast
# path
w_modules = space.sys.get('modules')
w_module = space.finditem_str(w_modules, name)
if w_module is None:
raise FastPathGiveUp
# to check whether a module is initialized, we can ask for
# module.__spec__._initializing, which should be False
try:
w_spec = space.getattr(w_module, space.newtext("__spec__"))
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
raise FastPathGiveUp
try:
w_initializing = space.getattr(w_spec, space.newtext("_initializing"))
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# we have no mod.__spec__._initializing, so it's probably a builtin
# module which we can assume is initialized
else:
if space.is_true(w_initializing):
raise FastPathGiveUp
return w_module
def import_name_fast_path(space, w_modulename, w_globals, w_locals, w_fromlist,
w_level):
level = space.int_w(w_level)
if level == 0:
# fast path only for absolute imports without a "from" list, for now
# fromlist can be supported if we are importing from a module, not a
# package. to check that, look for the existence of __path__ attribute
# in w_mod
try:
name = space.text_w(w_modulename)
w_mod = _gcd_import(space, name)
have_fromlist = space.is_true(w_fromlist)
if not have_fromlist:
dotindex = name.find(".")
if dotindex < 0:
return w_mod
return _gcd_import(space, name[:dotindex])
except FastPathGiveUp:
pass
else:
assert have_fromlist
w_path = space.findattr(w_mod, space.newtext("__path__"))
if w_path is not None:
# hard case, a package! Call back into importlib
w_importlib = space.getbuiltinmodule('_frozen_importlib')
return space.call_method(w_importlib, "_handle_fromlist",
w_mod, w_fromlist,
space.w_default_importlib_import)
else:
return w_mod
return space.call_function(space.w_default_importlib_import, w_modulename, w_globals,
w_locals, w_fromlist, w_level)
def get_spec(space, w_module):
try:
return space.getattr(w_module, space.newtext('__spec__'))
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
return space.w_None
def is_spec_initializing(space, w_spec):
if space.is_none(w_spec):
return False
try:
w_initializing = space.getattr(w_spec, space.newtext("_initializing"))
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
return False
else:
return space.is_true(w_initializing)
def get_path(space, w_module):
default = space.newtext("unknown location")
try:
w_ret = space.getattr(w_module, space.newtext('__file__'))
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
return default
if w_ret is space.w_None:
return default
return w_ret
| """
Implementation of the interpreter-level default import logic.
"""
import sys, os, stat, re, platform
from pypy.interpreter.module import Module, init_extra_module_attrs
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, generic_new_descr
from pypy.interpreter.error import OperationError, oefmt, wrap_oserror
from pypy.interpreter.baseobjspace import W_Root, CannotHaveLock
from pypy.interpreter.eval import Code
from pypy.interpreter.pycode import PyCode
from rpython.rlib import streamio, jit
from rpython.rlib.streamio import StreamErrors
from rpython.rlib.objectmodel import we_are_translated, specialize
from rpython.rlib.signature import signature
from rpython.rlib import rposix_stat, types
from pypy.module.sys.version import PYPY_VERSION, CPYTHON_VERSION
from pypy.module.__pypy__.interp_os import _multiarch
_WIN32 = sys.platform == 'win32'
SO = '.pyd' if _WIN32 else '.so'
PYC_TAG = 'pypy%d%d' % CPYTHON_VERSION[:2]
DEFAULT_SOABI_BASE = '%s-pp%d%d' % ((PYC_TAG,) + PYPY_VERSION[:2])
# see also pypy_incremental_magic in interpreter/pycode.py for the magic
# version number stored inside pyc files.
@specialize.memo()
def get_so_extension(space):
if space.config.objspace.soabi is not None:
soabi = space.config.objspace.soabi
else:
soabi = DEFAULT_SOABI_BASE
if not soabi:
return SO
if not space.config.translating:
soabi += 'i'
platform_name = sys.platform
if platform_name.startswith('linux'):
platform_name = _multiarch
elif platform_name == 'win32' and sys.maxsize > 2**32:
platform_name = 'win_amd64'
else:
# darwin?
pass
soabi += '-' + platform_name
result = '.' + soabi + SO
assert result == result.lower() # this is an implicit requirement of importlib on Windows!
return result
def has_so_extension(space):
return (space.config.objspace.usemodules.cpyext or
space.config.objspace.usemodules._cffi_backend)
def check_sys_modules(space, w_modulename):
return space.finditem(space.sys.get('modules'), w_modulename)
def check_sys_modules_w(space, modulename):
return space.finditem_str(space.sys.get('modules'), modulename)
lib_pypy = os.path.join(os.path.dirname(__file__),
'..', '..', '..', 'lib_pypy')
def _readall(space, filename):
try:
fd = os.open(filename, os.O_RDONLY, 0400)
try:
result = []
while True:
data = os.read(fd, 8192)
if not data:
break
result.append(data)
finally:
os.close(fd)
except OSError as e:
raise wrap_oserror(space, e, filename)
return ''.join(result)
@unwrap_spec(modulename='fsencode', level=int)
def importhook(space, modulename, w_globals=None, w_locals=None, w_fromlist=None, level=0):
# A minimal version, that can only import builtin and lib_pypy modules!
# The actual __import__ is
# pypy.module._frozenimportlib.interp_import.import_with_frames_removed
assert w_locals is w_globals
assert level == 0
w_mod = check_sys_modules_w(space, modulename)
if w_mod:
return w_mod
lock = getimportlock(space)
try:
lock.acquire_lock()
if modulename in space.builtin_modules:
return space.getbuiltinmodule(modulename)
ec = space.getexecutioncontext()
source = _readall(space, os.path.join(lib_pypy, modulename + '.py'))
pathname = "<frozen %s>" % modulename
code_w = ec.compiler.compile(source, pathname, 'exec', 0)
w_mod = add_module(space, space.newtext(modulename))
assert isinstance(w_mod, Module) # XXX why is that necessary?
space.setitem(space.sys.get('modules'), w_mod.w_name, w_mod)
space.setitem(w_mod.w_dict, space.newtext('__name__'), w_mod.w_name)
code_w.exec_code(space, w_mod.w_dict, w_mod.w_dict)
assert check_sys_modules_w(space, modulename)
finally:
lock.release_lock(silent_after_fork=True)
return w_mod
class _WIN32Path(object):
def __init__(self, path):
self.path = path
def as_unicode(self):
return self.path
def _prepare_module(space, w_mod, filename, pkgdir):
space.sys.setmodule(w_mod)
space.setattr(w_mod, space.newtext('__file__'), space.newfilename(filename))
space.setattr(w_mod, space.newtext('__doc__'), space.w_None)
if pkgdir is not None:
space.setattr(w_mod, space.newtext('__path__'),
space.newlist([space.newtext(pkgdir)]))
init_extra_module_attrs(space, w_mod)
def add_module(space, w_name):
w_mod = check_sys_modules(space, w_name)
if w_mod is None:
w_mod = Module(space, w_name)
init_extra_module_attrs(space, w_mod)
space.sys.setmodule(w_mod)
return w_mod
# __________________________________________________________________
#
# import lock, to prevent two threads from running module-level code in
# parallel. This behavior is more or less part of the language specs,
# as an attempt to avoid failure of 'from x import y' if module x is
# still being executed in another thread.
# This logic is tested in pypy.module.thread.test.test_import_lock.
class ImportRLock:
def __init__(self, space):
self.space = space
self.lock = None
self.lockowner = None
self.lockcounter = 0
def lock_held_by_someone_else(self):
me = self.space.getexecutioncontext() # used as thread ident
return self.lockowner is not None and self.lockowner is not me
def lock_held_by_anyone(self):
return self.lockowner is not None
def acquire_lock(self):
# this function runs with the GIL acquired so there is no race
# condition in the creation of the lock
if self.lock is None:
try:
self.lock = self.space.allocate_lock()
except CannotHaveLock:
return
me = self.space.getexecutioncontext() # used as thread ident
if self.lockowner is me:
pass # already acquired by the current thread
else:
self.lock.acquire(True)
assert self.lockowner is None
assert self.lockcounter == 0
self.lockowner = me
self.lockcounter += 1
def release_lock(self, silent_after_fork):
me = self.space.getexecutioncontext() # used as thread ident
if self.lockowner is not me:
if self.lockowner is None and silent_after_fork:
# Too bad. This situation can occur if a fork() occurred
# with the import lock held, and we're the child.
return
if self.lock is None: # CannotHaveLock occurred
return
space = self.space
raise oefmt(space.w_RuntimeError, "not holding the import lock")
assert self.lockcounter > 0
self.lockcounter -= 1
if self.lockcounter == 0:
self.lockowner = None
self.lock.release()
def reinit_lock(self):
# Called after fork() to ensure that newly created child
# processes do not share locks with the parent
# (Note that this runs after interp_imp.acquire_lock()
# done in the "before" fork hook, so that's why we decrease
# the lockcounter here)
if self.lockcounter > 1:
# Forked as a side effect of import
self.lock = self.space.allocate_lock()
me = self.space.getexecutioncontext()
self.lock.acquire(True)
# XXX: can the previous line fail?
self.lockowner = me
self.lockcounter -= 1
else:
self.lock = None
self.lockowner = None
self.lockcounter = 0
def getimportlock(space):
return space.fromcache(ImportRLock)
# __________________________________________________________________
#
# .pyc file support
"""
Magic word to reject .pyc files generated by other Python versions.
It should change for each incompatible change to the bytecode.
The value of CR and LF is incorporated so if you ever read or write
a .pyc file in text mode the magic number will be wrong; also, the
Apple MPW compiler swaps their values, botching string constants.
CPython 2 uses values between 20121 - 62xxx
CPython 3 uses values greater than 3000
PyPy uses values under 3000
"""
# Depending on which opcodes are enabled, eg. CALL_METHOD we bump the version
# number by some constant
#
# CPython + 0 -- used by CPython without the -U option
# CPython + 1 -- used by CPython with the -U option
# CPython + 7 = default_magic -- used by PyPy (incompatible!)
#
from pypy.interpreter.pycode import default_magic
MARSHAL_VERSION_FOR_PYC = 4
def get_pyc_magic(space):
return default_magic
def parse_source_module(space, pathname, source):
""" Parse a source file and return the corresponding code object """
ec = space.getexecutioncontext()
pycode = ec.compiler.compile(source, pathname, 'exec', 0)
return pycode
def exec_code_module(space, w_mod, code_w, pathname, cpathname,
write_paths=True):
w_dict = space.getattr(w_mod, space.newtext('__dict__'))
space.call_method(w_dict, 'setdefault',
space.newtext('__builtins__'),
space.builtin)
if write_paths:
if pathname is not None:
w_pathname = get_sourcefile(space, pathname)
else:
w_pathname = code_w.w_filename
if cpathname is not None:
w_cpathname = space.newfilename(cpathname)
else:
w_cpathname = space.w_None
space.setitem(w_dict, space.newtext("__file__"), w_pathname)
space.setitem(w_dict, space.newtext("__cached__"), w_cpathname)
#
# like PyImport_ExecCodeModuleObject(), we invoke
# _bootstrap_external._fix_up_module() here, which should try to
# fix a few more attributes (also __file__ and __cached__, but
# let's keep the logic that also sets them explicitly above, just
# in case)
space.appexec([w_dict, w_pathname, w_cpathname],
"""(d, pathname, cpathname):
from importlib._bootstrap_external import _fix_up_module
name = d.get('__name__')
if name is not None:
_fix_up_module(d, name, pathname, cpathname)
""")
#
code_w.exec_code(space, w_dict, w_dict)
def rightmost_sep(filename):
"Like filename.rfind('/'), but also search for \\."
index = filename.rfind(os.sep)
if os.altsep is not None:
index2 = filename.rfind(os.altsep)
index = max(index, index2)
return index
@signature(types.str0(), returns=types.str0())
def make_compiled_pathname(pathname):
"Given the path to a .py file, return the path to its .pyc file."
# foo.py -> __pycache__/foo.<tag>.pyc
lastpos = rightmost_sep(pathname) + 1
assert lastpos >= 0 # zero when slash, takes the full name
fname = pathname[lastpos:]
if lastpos > 0:
# Windows: re-use the last separator character (/ or \\) when
# appending the __pycache__ path.
lastsep = pathname[lastpos-1]
else:
lastsep = os.sep
ext = fname
for i in range(len(fname)):
if fname[i] == '.':
ext = fname[:i + 1]
result = (pathname[:lastpos] + "__pycache__" + lastsep +
ext + PYC_TAG + '.pyc')
return result
@signature(types.str0(), returns=types.any())
def make_source_pathname(pathname):
"Given the path to a .pyc file, return the path to its .py file."
# (...)/__pycache__/foo.<tag>.pyc -> (...)/foo.py
right = rightmost_sep(pathname)
if right < 0:
return None
left = rightmost_sep(pathname[:right]) + 1
assert left >= 0
if pathname[left:right] != '__pycache__':
return None
# Now verify that the path component to the right of the last
# slash has two dots in it.
rightpart = pathname[right + 1:]
dot0 = rightpart.find('.') + 1
if dot0 <= 0:
return None
dot1 = rightpart[dot0:].find('.') + 1
if dot1 <= 0:
return None
# Too many dots?
if rightpart[dot0 + dot1:].find('.') >= 0:
return None
result = pathname[:left] + rightpart[:dot0] + 'py'
return result
def get_sourcefile(space, filename):
start = len(filename) - 4
stop = len(filename) - 1
if not 0 <= start <= stop or filename[start:stop].lower() != ".py":
return space.newfilename(filename)
py = make_source_pathname(filename)
if py is None:
py = filename[:-1]
try:
st = os.stat(py)
except OSError:
pass
else:
if stat.S_ISREG(st.st_mode):
return space.newfilename(py)
return space.newfilename(filename)
def update_code_filenames(space, code_w, pathname, oldname=None):
assert isinstance(code_w, PyCode)
if oldname is None:
oldname = code_w.co_filename
elif code_w.co_filename != oldname:
return
code_w.co_filename = pathname
code_w.w_filename = space.newfilename(pathname)
constants = code_w.co_consts_w
for const in constants:
if const is not None and isinstance(const, PyCode):
update_code_filenames(space, const, pathname, oldname)
def _get_long(s):
a = ord(s[0])
b = ord(s[1])
c = ord(s[2])
d = ord(s[3])
if d >= 0x80:
d -= 0x100
return a | (b<<8) | (c<<16) | (d<<24)
def read_compiled_module(space, cpathname, strbuf):
""" Read a code object from a file and check it for validity """
w_marshal = space.getbuiltinmodule('marshal')
w_code = space.call_method(w_marshal, 'loads', space.newbytes(strbuf))
if not isinstance(w_code, Code):
raise oefmt(space.w_ImportError, "Non-code object in %s", cpathname)
return w_code
@jit.dont_look_inside
def load_compiled_module(space, w_modulename, w_mod, cpathname, magic,
source, write_paths=True):
"""
Load a module from a compiled file, execute it, and return its
module object.
"""
if magic != get_pyc_magic(space):
raise oefmt(space.w_ImportError, "Bad magic number in %s", cpathname)
#print "loading pyc file:", cpathname
code_w = read_compiled_module(space, cpathname, source)
optimize = space.sys.get_optimize()
if optimize >= 2:
code_w.remove_docstrings(space)
exec_code_module(space, w_mod, code_w, cpathname, cpathname, write_paths)
return w_mod
class FastPathGiveUp(Exception):
pass
def _gcd_import(space, name):
# check sys.modules, if the module is already there and initialized, we can
# use it, otherwise fall back to importlib.__import__
# NB: we don't get the importing lock here, but CPython has the same fast
# path
w_modules = space.sys.get('modules')
w_module = space.finditem_str(w_modules, name)
if w_module is None:
raise FastPathGiveUp
# to check whether a module is initialized, we can ask for
# module.__spec__._initializing, which should be False
try:
w_spec = space.getattr(w_module, space.newtext("__spec__"))
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
raise FastPathGiveUp
try:
w_initializing = space.getattr(w_spec, space.newtext("_initializing"))
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
# we have no mod.__spec__._initializing, so it's probably a builtin
# module which we can assume is initialized
else:
if space.is_true(w_initializing):
raise FastPathGiveUp
return w_module
def import_name_fast_path(space, w_modulename, w_globals, w_locals, w_fromlist,
w_level):
level = space.int_w(w_level)
if level == 0:
# fast path only for absolute imports without a "from" list, for now
# fromlist can be supported if we are importing from a module, not a
# package. to check that, look for the existence of __path__ attribute
# in w_mod
try:
name = space.text_w(w_modulename)
w_mod = _gcd_import(space, name)
have_fromlist = space.is_true(w_fromlist)
if not have_fromlist:
dotindex = name.find(".")
if dotindex < 0:
return w_mod
return _gcd_import(space, name[:dotindex])
except FastPathGiveUp:
pass
else:
assert have_fromlist
w_path = space.findattr(w_mod, space.newtext("__path__"))
if w_path is not None:
# hard case, a package! Call back into importlib
w_importlib = space.getbuiltinmodule('_frozen_importlib')
return space.call_method(w_importlib, "_handle_fromlist",
w_mod, w_fromlist,
space.w_default_importlib_import)
else:
return w_mod
return space.call_function(space.w_default_importlib_import, w_modulename, w_globals,
w_locals, w_fromlist, w_level)
def get_spec(space, w_module):
try:
return space.getattr(w_module, space.newtext('__spec__'))
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
return space.w_None
def is_spec_initializing(space, w_spec):
if space.is_none(w_spec):
return False
try:
w_initializing = space.getattr(w_spec, space.newtext("_initializing"))
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
return False
else:
return space.is_true(w_initializing)
def get_path(space, w_module):
default = space.newtext("unknown location")
try:
w_ret = space.getattr(w_module, space.newtext('__file__'))
except OperationError as e:
if not e.match(space, space.w_AttributeError):
raise
return default
if w_ret is space.w_None:
return default
return w_ret
| en | 0.810904 | Implementation of the interpreter-level default import logic. # see also pypy_incremental_magic in interpreter/pycode.py for the magic # version number stored inside pyc files. # darwin? # this is an implicit requirement of importlib on Windows! # A minimal version, that can only import builtin and lib_pypy modules! # The actual __import__ is # pypy.module._frozenimportlib.interp_import.import_with_frames_removed # XXX why is that necessary? # __________________________________________________________________ # # import lock, to prevent two threads from running module-level code in # parallel. This behavior is more or less part of the language specs, # as an attempt to avoid failure of 'from x import y' if module x is # still being executed in another thread. # This logic is tested in pypy.module.thread.test.test_import_lock. # used as thread ident # this function runs with the GIL acquired so there is no race # condition in the creation of the lock # used as thread ident # already acquired by the current thread # used as thread ident # Too bad. This situation can occur if a fork() occurred # with the import lock held, and we're the child. # CannotHaveLock occurred # Called after fork() to ensure that newly created child # processes do not share locks with the parent # (Note that this runs after interp_imp.acquire_lock() # done in the "before" fork hook, so that's why we decrease # the lockcounter here) # Forked as a side effect of import # XXX: can the previous line fail? # __________________________________________________________________ # # .pyc file support Magic word to reject .pyc files generated by other Python versions. It should change for each incompatible change to the bytecode. The value of CR and LF is incorporated so if you ever read or write a .pyc file in text mode the magic number will be wrong; also, the Apple MPW compiler swaps their values, botching string constants. CPython 2 uses values between 20121 - 62xxx CPython 3 uses values greater than 3000 PyPy uses values under 3000 # Depending on which opcodes are enabled, eg. CALL_METHOD we bump the version # number by some constant # # CPython + 0 -- used by CPython without the -U option # CPython + 1 -- used by CPython with the -U option # CPython + 7 = default_magic -- used by PyPy (incompatible!) # Parse a source file and return the corresponding code object # # like PyImport_ExecCodeModuleObject(), we invoke # _bootstrap_external._fix_up_module() here, which should try to # fix a few more attributes (also __file__ and __cached__, but # let's keep the logic that also sets them explicitly above, just # in case) (d, pathname, cpathname): from importlib._bootstrap_external import _fix_up_module name = d.get('__name__') if name is not None: _fix_up_module(d, name, pathname, cpathname) # # foo.py -> __pycache__/foo.<tag>.pyc # zero when slash, takes the full name # Windows: re-use the last separator character (/ or \\) when # appending the __pycache__ path. # (...)/__pycache__/foo.<tag>.pyc -> (...)/foo.py # Now verify that the path component to the right of the last # slash has two dots in it. # Too many dots? Read a code object from a file and check it for validity Load a module from a compiled file, execute it, and return its module object. #print "loading pyc file:", cpathname # check sys.modules, if the module is already there and initialized, we can # use it, otherwise fall back to importlib.__import__ # NB: we don't get the importing lock here, but CPython has the same fast # path # to check whether a module is initialized, we can ask for # module.__spec__._initializing, which should be False # we have no mod.__spec__._initializing, so it's probably a builtin # module which we can assume is initialized # fast path only for absolute imports without a "from" list, for now # fromlist can be supported if we are importing from a module, not a # package. to check that, look for the existence of __path__ attribute # in w_mod # hard case, a package! Call back into importlib | 2.060578 | 2 |
data-hub-api/apps/migrator/tests/queries/test_all.py | uktrade/data-hub-api-old | 0 | 6633007 | import datetime
from django.utils import timezone
from reversion import revisions as reversion
from reversion.models import Revision, Version
from migrator.tests.models import SimpleObj
from migrator.tests.base import BaseMockedCDMSRestApiTestCase
from cdms_api.tests.rest.utils import mocked_cdms_list
class AllTestCase(BaseMockedCDMSRestApiTestCase):
def test_all_with_some_local_objs(self):
"""
Klass.objects.all() will:
- hit cdms to get the objs
- create or update local objs if necessary
- return local objs
In this case:
- cdms-pk1 does not exist in local =>
- local obj should get created
- revisions created
- cdms-pk2 is in sync with local obj =>
- local obj should not change
- no revisions should get created
- cdms-pk3 is more up-to-date than local =>
- local obj should get updated
- revisions created
"""
obj2 = SimpleObj.objects.skip_cdms().create(
cdms_pk='cdms-pk2', name='name2', int_field=10
)
obj3 = SimpleObj.objects.skip_cdms().create(
cdms_pk='cdms-pk3', name='name3', int_field=20
)
self.reset_revisions()
mocked_list = [
{
'SimpleId': 'cdms-pk1',
'Name': 'name1',
'CreatedOn': (timezone.now() - datetime.timedelta(days=2)).replace(microsecond=0),
'ModifiedOn': (timezone.now() - datetime.timedelta(days=1)).replace(microsecond=0),
'DateTimeField': None,
'IntField': None,
'FKField': None
},
{
'SimpleId': 'cdms-pk2',
'Name': 'name2',
'ModifiedOn': obj2.modified,
'DateTimeField': None,
'IntField': 20,
'FKField': None
},
{
'SimpleId': 'cdms-pk3',
'Name': 'name3',
'ModifiedOn': obj3.modified + datetime.timedelta(days=1),
'DateTimeField': None,
'IntField': 20,
'FKField': None
},
]
self.mocked_cdms_api.list.side_effect = mocked_cdms_list(
list_data=mocked_list
)
objs = SimpleObj.objects.all()
self.assertEqual(len(objs), 3)
objs_dict = {obj.cdms_pk: obj for obj in objs}
# cdms-pk1
obj1 = objs_dict['cdms-pk1']
self.assertEqual(obj1.modified, mocked_list[0]['ModifiedOn'])
self.assertEqual(obj1.created, mocked_list[0]['CreatedOn'])
obj1 = SimpleObj.objects.skip_cdms().get(pk=obj1.pk) # reload and double check
self.assertEqual(obj1.modified, mocked_list[0]['ModifiedOn'])
self.assertEqual(obj1.created, mocked_list[0]['CreatedOn'])
# cdms-pk2
obj2 = objs_dict['cdms-pk2']
self.assertEqual(obj2.modified, mocked_list[1]['ModifiedOn'])
self.assertEqual(obj2.int_field, 10) # not 20 as records in sync
obj2 = SimpleObj.objects.skip_cdms().get(pk=obj2.pk) # reload and double check
self.assertEqual(obj2.modified, mocked_list[1]['ModifiedOn'])
self.assertEqual(obj2.int_field, 10) # not 20 as records in sync
# cdms-pk3
obj3 = objs_dict['cdms-pk3']
self.assertEqual(obj3.modified, mocked_list[2]['ModifiedOn'])
self.assertEqual(obj3.int_field, 20) # not 10 as record updated from cdms
obj3 = SimpleObj.objects.skip_cdms().get(pk=obj3.pk) # reload and double check
self.assertEqual(obj3.modified, mocked_list[2]['ModifiedOn'])
self.assertEqual(obj3.int_field, 20) # not 10 as record updated from cdms
self.assertAPINotCalled(['get', 'create', 'delete', 'update'])
# check versions
self.assertEqual(Version.objects.count(), 2)
self.assertEqual(Revision.objects.count(), 2)
# obj1
version_list_obj1 = reversion.get_for_object(obj1)
self.assertEqual(len(version_list_obj1), 1)
version = version_list_obj1[0]
version_data = version.field_dict
self.assertIsCDMSRefreshRevision(version.revision)
self.assertEqual(version_data['cdms_pk'], obj1.cdms_pk)
self.assertEqual(version_data['modified'], obj1.modified)
self.assertEqual(version_data['created'], obj1.created)
# obj3
version_list_obj3 = reversion.get_for_object(obj3)
self.assertEqual(len(version_list_obj3), 1)
version = version_list_obj3[0]
version_data = version.field_dict
self.assertIsCDMSRefreshRevision(version.revision)
self.assertEqual(version_data['cdms_pk'], obj3.cdms_pk)
self.assertEqual(version_data['modified'], obj3.modified)
self.assertEqual(version_data['created'], obj3.created)
def test_filter_all(self):
"""
Klass.objects.filter() should work as Klass.objects.all().
"""
self.mocked_cdms_api.list.return_value = []
results = list(SimpleObj.objects.filter())
self.assertEqual(results, [])
def test_exception(self):
"""
In case of exceptions during cdms calls, the exception gets propagated.
No changes or revisions happen.
"""
self.mocked_cdms_api.list.side_effect = Exception
self.assertRaises(
Exception,
list, SimpleObj.objects.all()
)
self.assertAPINotCalled(['get', 'create', 'delete', 'update'])
self.assertNoRevisions()
def test_all_skip_cdms(self):
"""
Klass.objects.skip_cdms().all() should not hit cdms and should not create any extra revisions.
"""
list(SimpleObj.objects.skip_cdms().all())
self.assertNoAPICalled()
self.assertNoRevisions()
| import datetime
from django.utils import timezone
from reversion import revisions as reversion
from reversion.models import Revision, Version
from migrator.tests.models import SimpleObj
from migrator.tests.base import BaseMockedCDMSRestApiTestCase
from cdms_api.tests.rest.utils import mocked_cdms_list
class AllTestCase(BaseMockedCDMSRestApiTestCase):
def test_all_with_some_local_objs(self):
"""
Klass.objects.all() will:
- hit cdms to get the objs
- create or update local objs if necessary
- return local objs
In this case:
- cdms-pk1 does not exist in local =>
- local obj should get created
- revisions created
- cdms-pk2 is in sync with local obj =>
- local obj should not change
- no revisions should get created
- cdms-pk3 is more up-to-date than local =>
- local obj should get updated
- revisions created
"""
obj2 = SimpleObj.objects.skip_cdms().create(
cdms_pk='cdms-pk2', name='name2', int_field=10
)
obj3 = SimpleObj.objects.skip_cdms().create(
cdms_pk='cdms-pk3', name='name3', int_field=20
)
self.reset_revisions()
mocked_list = [
{
'SimpleId': 'cdms-pk1',
'Name': 'name1',
'CreatedOn': (timezone.now() - datetime.timedelta(days=2)).replace(microsecond=0),
'ModifiedOn': (timezone.now() - datetime.timedelta(days=1)).replace(microsecond=0),
'DateTimeField': None,
'IntField': None,
'FKField': None
},
{
'SimpleId': 'cdms-pk2',
'Name': 'name2',
'ModifiedOn': obj2.modified,
'DateTimeField': None,
'IntField': 20,
'FKField': None
},
{
'SimpleId': 'cdms-pk3',
'Name': 'name3',
'ModifiedOn': obj3.modified + datetime.timedelta(days=1),
'DateTimeField': None,
'IntField': 20,
'FKField': None
},
]
self.mocked_cdms_api.list.side_effect = mocked_cdms_list(
list_data=mocked_list
)
objs = SimpleObj.objects.all()
self.assertEqual(len(objs), 3)
objs_dict = {obj.cdms_pk: obj for obj in objs}
# cdms-pk1
obj1 = objs_dict['cdms-pk1']
self.assertEqual(obj1.modified, mocked_list[0]['ModifiedOn'])
self.assertEqual(obj1.created, mocked_list[0]['CreatedOn'])
obj1 = SimpleObj.objects.skip_cdms().get(pk=obj1.pk) # reload and double check
self.assertEqual(obj1.modified, mocked_list[0]['ModifiedOn'])
self.assertEqual(obj1.created, mocked_list[0]['CreatedOn'])
# cdms-pk2
obj2 = objs_dict['cdms-pk2']
self.assertEqual(obj2.modified, mocked_list[1]['ModifiedOn'])
self.assertEqual(obj2.int_field, 10) # not 20 as records in sync
obj2 = SimpleObj.objects.skip_cdms().get(pk=obj2.pk) # reload and double check
self.assertEqual(obj2.modified, mocked_list[1]['ModifiedOn'])
self.assertEqual(obj2.int_field, 10) # not 20 as records in sync
# cdms-pk3
obj3 = objs_dict['cdms-pk3']
self.assertEqual(obj3.modified, mocked_list[2]['ModifiedOn'])
self.assertEqual(obj3.int_field, 20) # not 10 as record updated from cdms
obj3 = SimpleObj.objects.skip_cdms().get(pk=obj3.pk) # reload and double check
self.assertEqual(obj3.modified, mocked_list[2]['ModifiedOn'])
self.assertEqual(obj3.int_field, 20) # not 10 as record updated from cdms
self.assertAPINotCalled(['get', 'create', 'delete', 'update'])
# check versions
self.assertEqual(Version.objects.count(), 2)
self.assertEqual(Revision.objects.count(), 2)
# obj1
version_list_obj1 = reversion.get_for_object(obj1)
self.assertEqual(len(version_list_obj1), 1)
version = version_list_obj1[0]
version_data = version.field_dict
self.assertIsCDMSRefreshRevision(version.revision)
self.assertEqual(version_data['cdms_pk'], obj1.cdms_pk)
self.assertEqual(version_data['modified'], obj1.modified)
self.assertEqual(version_data['created'], obj1.created)
# obj3
version_list_obj3 = reversion.get_for_object(obj3)
self.assertEqual(len(version_list_obj3), 1)
version = version_list_obj3[0]
version_data = version.field_dict
self.assertIsCDMSRefreshRevision(version.revision)
self.assertEqual(version_data['cdms_pk'], obj3.cdms_pk)
self.assertEqual(version_data['modified'], obj3.modified)
self.assertEqual(version_data['created'], obj3.created)
def test_filter_all(self):
"""
Klass.objects.filter() should work as Klass.objects.all().
"""
self.mocked_cdms_api.list.return_value = []
results = list(SimpleObj.objects.filter())
self.assertEqual(results, [])
def test_exception(self):
"""
In case of exceptions during cdms calls, the exception gets propagated.
No changes or revisions happen.
"""
self.mocked_cdms_api.list.side_effect = Exception
self.assertRaises(
Exception,
list, SimpleObj.objects.all()
)
self.assertAPINotCalled(['get', 'create', 'delete', 'update'])
self.assertNoRevisions()
def test_all_skip_cdms(self):
"""
Klass.objects.skip_cdms().all() should not hit cdms and should not create any extra revisions.
"""
list(SimpleObj.objects.skip_cdms().all())
self.assertNoAPICalled()
self.assertNoRevisions()
| en | 0.738834 | Klass.objects.all() will: - hit cdms to get the objs - create or update local objs if necessary - return local objs In this case: - cdms-pk1 does not exist in local => - local obj should get created - revisions created - cdms-pk2 is in sync with local obj => - local obj should not change - no revisions should get created - cdms-pk3 is more up-to-date than local => - local obj should get updated - revisions created # cdms-pk1 # reload and double check # cdms-pk2 # not 20 as records in sync # reload and double check # not 20 as records in sync # cdms-pk3 # not 10 as record updated from cdms # reload and double check # not 10 as record updated from cdms # check versions # obj1 # obj3 Klass.objects.filter() should work as Klass.objects.all(). In case of exceptions during cdms calls, the exception gets propagated. No changes or revisions happen. Klass.objects.skip_cdms().all() should not hit cdms and should not create any extra revisions. | 2.043074 | 2 |
main.py | FajarTheGGman/RoseKiller | 1 | 6633008 | import os
from bs4 import BeautifulSoup as bs
import urllib3 as url
from content.xss import *
from content.dork import *
from content.sc import *
class Main:
def banner():
print(" ';.")
print(" .---., \ [!] Report Error To My Social Media :)")
print(" []-.__,>=======;==================")
print(" `----' ,/ [Rose Killer]")
print(" .;' [By]")
print(" [<NAME>]")
def Run():
def help():
print("[Help Commands]")
print("- help (See all commands)")
print("- xss (exploit websites using xss method)")
print("- dork (dork website)")
print("- script (take script deface in website)")
user = str(input("\n\n[RoseKiller] >_ "))
if user == "xss":
xss = Xss.main()
elif user == "dork":
dork = Dork.main()
elif user == "script":
sc = Sc.main()
elif user == "help":
help()
else:
print("[!] Wrong Commands")
help()
banner = Main.banner()
r = Main.Run()
| import os
from bs4 import BeautifulSoup as bs
import urllib3 as url
from content.xss import *
from content.dork import *
from content.sc import *
class Main:
def banner():
print(" ';.")
print(" .---., \ [!] Report Error To My Social Media :)")
print(" []-.__,>=======;==================")
print(" `----' ,/ [Rose Killer]")
print(" .;' [By]")
print(" [<NAME>]")
def Run():
def help():
print("[Help Commands]")
print("- help (See all commands)")
print("- xss (exploit websites using xss method)")
print("- dork (dork website)")
print("- script (take script deface in website)")
user = str(input("\n\n[RoseKiller] >_ "))
if user == "xss":
xss = Xss.main()
elif user == "dork":
dork = Dork.main()
elif user == "script":
sc = Sc.main()
elif user == "help":
help()
else:
print("[!] Wrong Commands")
help()
banner = Main.banner()
r = Main.Run()
| none | 1 | 2.95346 | 3 |
|
Converter/OpenVINO/Tests/ModulesTest.py | EmilPi/PuzzleLib | 52 | 6633009 | import numpy as np
from PuzzleLib import Config
Config.backend = Config.Backend.intel
Config.globalEvalMode = True
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Containers.Sequential import Sequential
from PuzzleLib.Containers.Parallel import Parallel
from PuzzleLib.Modules.BatchNorm import BatchNorm
from PuzzleLib.Modules.Concat import Concat
from PuzzleLib.Modules.MulAddConst import MulAddConst
from PuzzleLib.Modules.Split import Split
from PuzzleLib.Modules.SoftMax import SoftMax
from PuzzleLib.Modules.Upsample2D import Upsample2D
from PuzzleLib.Converter.OpenVINO.BuildVINOEngine import buildVINOEngine
def batchNormTest():
batchsize, size = 16, 10
mod = BatchNorm(size, name="bn")
mod.evalMode()
data = gpuarray.to_gpu(np.random.randn(batchsize, size).astype(np.float32))
engine = buildVINOEngine(mod, data.shape, savepath="../TestData")
outdata = mod(data)
enginedata = engine(data)
assert np.allclose(outdata.get(), enginedata.get())
def concatTest():
batchsize, height, width = 4, 5, 8
maps1, maps2 = 3, 2
mod = Concat(axis=1, name="concat")
data = [
gpuarray.to_gpu(np.random.randn(batchsize, maps, height, width).astype(np.float32)) for maps in [maps1, maps2]
]
engine = buildVINOEngine(mod, [subdata.shape for subdata in data], savepath="../TestData")
outdata = mod(data)
enginedata = engine(data)
assert np.allclose(outdata.get(), enginedata.get())
def mulAddConstTest():
batchsize, maps, height, width = 4, 3, 5, 8
mod = MulAddConst(a=1.5, b=-2.0, name="muladd")
data = gpuarray.to_gpu(np.random.randn(batchsize, maps, height, width).astype(np.float32))
engine = buildVINOEngine(mod, data.shape, savepath="../TestData")
outdata = mod(data)
enginedata = engine(data)
assert np.allclose(outdata.get(), enginedata.get())
def splitTest():
batchsize, maps, height, width = 2, 6, 4, 5
net = Sequential(name="split")
net.append(Split(axis=1, sections=(2, 4)))
net.append(Parallel().append(SoftMax()).append(SoftMax()))
data = gpuarray.to_gpu(np.random.randn(batchsize, maps, height, width).astype(np.float32))
engine = buildVINOEngine(net, data.shape, savepath="../TestData")
outdata = net(data)
enginedata = engine(data)
assert all(np.allclose(outdat.get(), enginedat.get()) for outdat, enginedat in zip(outdata, enginedata))
def upsample2dTest():
batchsize, maps, height, width = 4, 3, 5, 8
mod = Upsample2D(scale=2, name="upsample")
data = gpuarray.to_gpu(np.random.randn(batchsize, maps, height, width).astype(np.float32))
engine = buildVINOEngine(mod, data.shape, savepath="../TestData")
outdata = mod(data)
enginedata = engine(data)
assert np.allclose(outdata.get(), enginedata.get())
def main():
batchNormTest()
concatTest()
mulAddConstTest()
splitTest()
upsample2dTest()
if __name__ == "__main__":
main()
| import numpy as np
from PuzzleLib import Config
Config.backend = Config.Backend.intel
Config.globalEvalMode = True
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Containers.Sequential import Sequential
from PuzzleLib.Containers.Parallel import Parallel
from PuzzleLib.Modules.BatchNorm import BatchNorm
from PuzzleLib.Modules.Concat import Concat
from PuzzleLib.Modules.MulAddConst import MulAddConst
from PuzzleLib.Modules.Split import Split
from PuzzleLib.Modules.SoftMax import SoftMax
from PuzzleLib.Modules.Upsample2D import Upsample2D
from PuzzleLib.Converter.OpenVINO.BuildVINOEngine import buildVINOEngine
def batchNormTest():
batchsize, size = 16, 10
mod = BatchNorm(size, name="bn")
mod.evalMode()
data = gpuarray.to_gpu(np.random.randn(batchsize, size).astype(np.float32))
engine = buildVINOEngine(mod, data.shape, savepath="../TestData")
outdata = mod(data)
enginedata = engine(data)
assert np.allclose(outdata.get(), enginedata.get())
def concatTest():
batchsize, height, width = 4, 5, 8
maps1, maps2 = 3, 2
mod = Concat(axis=1, name="concat")
data = [
gpuarray.to_gpu(np.random.randn(batchsize, maps, height, width).astype(np.float32)) for maps in [maps1, maps2]
]
engine = buildVINOEngine(mod, [subdata.shape for subdata in data], savepath="../TestData")
outdata = mod(data)
enginedata = engine(data)
assert np.allclose(outdata.get(), enginedata.get())
def mulAddConstTest():
batchsize, maps, height, width = 4, 3, 5, 8
mod = MulAddConst(a=1.5, b=-2.0, name="muladd")
data = gpuarray.to_gpu(np.random.randn(batchsize, maps, height, width).astype(np.float32))
engine = buildVINOEngine(mod, data.shape, savepath="../TestData")
outdata = mod(data)
enginedata = engine(data)
assert np.allclose(outdata.get(), enginedata.get())
def splitTest():
batchsize, maps, height, width = 2, 6, 4, 5
net = Sequential(name="split")
net.append(Split(axis=1, sections=(2, 4)))
net.append(Parallel().append(SoftMax()).append(SoftMax()))
data = gpuarray.to_gpu(np.random.randn(batchsize, maps, height, width).astype(np.float32))
engine = buildVINOEngine(net, data.shape, savepath="../TestData")
outdata = net(data)
enginedata = engine(data)
assert all(np.allclose(outdat.get(), enginedat.get()) for outdat, enginedat in zip(outdata, enginedata))
def upsample2dTest():
batchsize, maps, height, width = 4, 3, 5, 8
mod = Upsample2D(scale=2, name="upsample")
data = gpuarray.to_gpu(np.random.randn(batchsize, maps, height, width).astype(np.float32))
engine = buildVINOEngine(mod, data.shape, savepath="../TestData")
outdata = mod(data)
enginedata = engine(data)
assert np.allclose(outdata.get(), enginedata.get())
def main():
batchNormTest()
concatTest()
mulAddConstTest()
splitTest()
upsample2dTest()
if __name__ == "__main__":
main()
| none | 1 | 2.018336 | 2 |
|
build/update-olm.py | davidvossel/node-maintenance-operator | 0 | 6633010 | #!/usr/bin/env python3
import logging
import sys
import yaml
_ANNOTATIONS = {
'categories': 'OpenShift Optional',
'containerImage': 'quay.io/kubevirt/node-maintenance-operator',
'repository': 'https://github.com/kubevirt/node-maintenance-operator',
'description': \
'Node-maintenance-operator maintains nodes in cluster',
}
_DESCRIPTION = "Node maintenance operator"
_NAMESPACE = 'node-maintenance-operator'
_SPEC = {
'description': _DESCRIPTION,
'provider': {
'name': 'KubeVirt project'
},
'maintainers': [{
'name': 'KubeVirt project',
'email': '<EMAIL>',
}],
'keywords': [
'KubeVirt', 'Virtualization', 'Node-maintenance'
],
'links': [{
'name': 'KubeVirt',
'url': 'https://kubevirt.io',
}, {
'name': 'Source Code',
'url': 'https://github.com/kubevirt/node-maintenance-operator'
}],
'labels': {
'alm-owner-kubevirt': 'nodemaintenanceoperator',
'operated-by': 'nodemaintenanceoperator',
},
'selector': {
'matchLabels': {
'alm-owner-kubevirt': 'nodemaintenanceoperator',
'operated-by': 'nodemaintenanceoperator',
},
},
}
_CRD_INFOS = {
'nodemaintenances.kubevirt.io': {
'displayName': 'KubeVirt node maintenance',
'description': \
'Represents a deployment of node maintenance crd',
'specDescriptors': [{
'description': \
'The version of the node maintenance to deploy',
'displayName': 'Version',
'path': 'version',
'x-descriptors': [
'urn:alm:descriptor:io.kubernetes.node-maintenance:version',
],
}],
}
}
def process(path):
with open(path, 'rt') as fh:
manifest = yaml.safe_load(fh)
manifest['metadata']['namespace'] = _NAMESPACE
manifest['metadata']['annotations'].update(_ANNOTATIONS)
manifest['spec'].update(_SPEC)
for crd in manifest['spec']['customresourcedefinitions']['owned']:
crd.update(_CRD_INFOS.get(crd['name'], {}))
yaml.safe_dump(manifest, sys.stdout)
if __name__ == '__main__':
for arg in sys.argv[1:]:
try:
process(arg)
except Exception as ex:
logging.error('error processing %r: %s', arg, ex)
# keep going!
| #!/usr/bin/env python3
import logging
import sys
import yaml
_ANNOTATIONS = {
'categories': 'OpenShift Optional',
'containerImage': 'quay.io/kubevirt/node-maintenance-operator',
'repository': 'https://github.com/kubevirt/node-maintenance-operator',
'description': \
'Node-maintenance-operator maintains nodes in cluster',
}
_DESCRIPTION = "Node maintenance operator"
_NAMESPACE = 'node-maintenance-operator'
_SPEC = {
'description': _DESCRIPTION,
'provider': {
'name': 'KubeVirt project'
},
'maintainers': [{
'name': 'KubeVirt project',
'email': '<EMAIL>',
}],
'keywords': [
'KubeVirt', 'Virtualization', 'Node-maintenance'
],
'links': [{
'name': 'KubeVirt',
'url': 'https://kubevirt.io',
}, {
'name': 'Source Code',
'url': 'https://github.com/kubevirt/node-maintenance-operator'
}],
'labels': {
'alm-owner-kubevirt': 'nodemaintenanceoperator',
'operated-by': 'nodemaintenanceoperator',
},
'selector': {
'matchLabels': {
'alm-owner-kubevirt': 'nodemaintenanceoperator',
'operated-by': 'nodemaintenanceoperator',
},
},
}
_CRD_INFOS = {
'nodemaintenances.kubevirt.io': {
'displayName': 'KubeVirt node maintenance',
'description': \
'Represents a deployment of node maintenance crd',
'specDescriptors': [{
'description': \
'The version of the node maintenance to deploy',
'displayName': 'Version',
'path': 'version',
'x-descriptors': [
'urn:alm:descriptor:io.kubernetes.node-maintenance:version',
],
}],
}
}
def process(path):
with open(path, 'rt') as fh:
manifest = yaml.safe_load(fh)
manifest['metadata']['namespace'] = _NAMESPACE
manifest['metadata']['annotations'].update(_ANNOTATIONS)
manifest['spec'].update(_SPEC)
for crd in manifest['spec']['customresourcedefinitions']['owned']:
crd.update(_CRD_INFOS.get(crd['name'], {}))
yaml.safe_dump(manifest, sys.stdout)
if __name__ == '__main__':
for arg in sys.argv[1:]:
try:
process(arg)
except Exception as ex:
logging.error('error processing %r: %s', arg, ex)
# keep going!
| en | 0.467461 | #!/usr/bin/env python3 # keep going! | 1.777788 | 2 |
test_proj/media_library/tests/test_managers.py | Querschlag/django-video-encoding | 116 | 6633011 | <reponame>Querschlag/django-video-encoding
import pytest
from django.contrib.contenttypes.models import ContentType
from ..models import Format
@pytest.fixture
def video_format(local_video):
return Format.objects.create(
object_id=local_video.pk,
content_type=ContentType.objects.get_for_model(local_video),
field_name='file',
format='mp4_hd',
progress=100,
)
@pytest.mark.django_db
def test_related_manager(local_video):
assert hasattr(local_video.format_set, 'complete')
assert hasattr(local_video.format_set, 'in_progress')
@pytest.mark.django_db
def test_in_progress(video_format):
video_format.progress = 30
video_format.save()
assert Format.objects.complete().count() == 0
assert Format.objects.in_progress().count() == 1
assert Format.objects.in_progress()[0].progress < 100
@pytest.mark.django_db
def test_complete(video_format):
assert Format.objects.in_progress().count() == 0
assert Format.objects.complete().count() == 1
assert Format.objects.complete()[0].progress == 100
| import pytest
from django.contrib.contenttypes.models import ContentType
from ..models import Format
@pytest.fixture
def video_format(local_video):
return Format.objects.create(
object_id=local_video.pk,
content_type=ContentType.objects.get_for_model(local_video),
field_name='file',
format='mp4_hd',
progress=100,
)
@pytest.mark.django_db
def test_related_manager(local_video):
assert hasattr(local_video.format_set, 'complete')
assert hasattr(local_video.format_set, 'in_progress')
@pytest.mark.django_db
def test_in_progress(video_format):
video_format.progress = 30
video_format.save()
assert Format.objects.complete().count() == 0
assert Format.objects.in_progress().count() == 1
assert Format.objects.in_progress()[0].progress < 100
@pytest.mark.django_db
def test_complete(video_format):
assert Format.objects.in_progress().count() == 0
assert Format.objects.complete().count() == 1
assert Format.objects.complete()[0].progress == 100 | none | 1 | 2.233856 | 2 |
|
dashlib/mnb_maketx.py | chaeplin/dash-mnb | 18 | 6633012 | <reponame>chaeplin/dash-mnb
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
from decimal import Decimal
from config import *
from mnb_misc import *
from mnb_rpc import *
from mnb_mnconf import *
from mnb_hwwallet import *
def print_balance(mn_config, have_unconfirmed_tx):
need_wallet_rescan = False
print('\n[masternodes balance]')
print('alias\tcnt\tspn\tbalance\t\taddress to send MN earnings')
total_balance = 0
for m in mn_config:
alias = m.get('alias')
unspent = m.get('collateral_dashd_balance')
sumofunspent = sum(unspent)
cnt = len(unspent)
total_balance = total_balance + sumofunspent
spn = 0
txs_spn = m.get('txs')
for sp in txs_spn:
spn = spn + len(sp)
if cnt == 0:
need_wallet_rescan = True
if 'rpcusessl' in globals() and rpcusessl and rpcbindip == "test.stats.dash.org":
need_wallet_rescan = False
if MOVE_1K_COLLATERAL:
need_wallet_rescan = False
print(
alias +
'\t' +
'{:2d}\t{:2d}\t{:13.8f}'.format(
cnt,
spn,
sumofunspent) +
'\t' +
str(m.get('receiving_address', '----')))
print('\n\t\t Total : ', total_balance)
print('\n* cnt - count : number of payouts(un + mature) + 1(collateral)')
print('* spn - spendable : number of spendable payouts(mature, over 100 confirmation)')
if have_unconfirmed_tx:
print('* can be inaccurate after a transaction(transfer/xfer), need 1 confirmation')
return need_wallet_rescan
def check_mtime_of_tx(unspent_cache_abs_path):
if os.path.exists(unspent_cache_abs_path):
mtime_of_unspent_cache = int(os.path.getmtime(unspent_cache_abs_path))
cache_unspent_statinfo = os.stat(unspent_cache_abs_path)
else:
return True
if cache_unspent_statinfo.st_size == 0:
return True
if time.time() > (mtime_of_unspent_cache + (txs_cache_refresh_interval_hour * 60 * 60)):
return True
return False
def get_unspent_txs(mnconfig, blockcount, access, SEND_TO_BIP32, bip32_unused):
collateral_address = mnconfig.get('collateral_address')
collateral_txidtxidn = mnconfig.get('collateral_txidtxidn')
receiving_address = mnconfig.get('receiving_address')
unspent_cache_abs_path = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'../cache/' +
(
'MAINNET' if MAINNET else 'TESTNET') +
'-' +
collateral_txidtxidn +
'-unspent.dat')
bgetListUnspentAgain = check_mtime_of_tx(unspent_cache_abs_path)
if bgetListUnspentAgain:
#listunspent = get_listunspent(6, 999999999, collateral_address, access)
listunspent = getaddressutxos(collateral_address, access)
with open(unspent_cache_abs_path, 'w') as outfile:
json.dump(listunspent, outfile)
else:
with open(unspent_cache_abs_path) as data_file:
listunspent = json.load(data_file, parse_float=Decimal)
unspent_mine = []
balance_mine = []
for m in listunspent:
unspent_txidtxidn = get_txidtxidn(m['txid'], m['outputIndex'])
#unspent_amount = m['amount']
unspent_amount = round(Decimal(float(m['satoshis'] / 1e8)), 8)
balance_mine.append(unspent_amount)
if MOVE_1K_COLLATERAL:
unspent_mine.append(m)
elif MOVE_1K_COLLATERAL == False:
if (unspent_txidtxidn != collateral_txidtxidn) and (
unspent_amount < max_amounts):
unspent_mine.append(m)
txs = []
bip32sendto_all = []
mature_confirmation = 101
# for testing
#mature_confirmation = 10
desc_displayed = False
for x in unspent_mine:
if (x.get('address') == collateral_address) and ((blockcount - mature_confirmation) > x.get('height')):
if SEND_TO_BIP32 and bip32_unused != None and receiving_address == 'BIP32_PATH':
if not desc_displayed:
print("\t---> getting unused addresses of bip32 path")
desc_displayed = True
bip32sendto_unused = bip32_unused.__next__()
tx = {
"amount": round(Decimal(float(x.get('satoshis') / 1e8)), 8),
"txid": x.get('txid'),
"vout": x.get('outputIndex'),
"bip32sendto": bip32sendto_unused
}
bip32sendto_all.append(bip32sendto_unused)
else:
tx = {
"amount": round(Decimal(float(x.get('satoshis') / 1e8)), 8),
"txid": x.get('txid'),
"vout": x.get('outputIndex')
}
txs.append(tx)
if SEND_TO_BIP32 and bip32_unused != None and receiving_address == 'BIP32_PATH':
sublist = [txs[i:i + 1] for i in range(0, len(txs), 1)]
else:
sublist = [txs[i:i + max_unspent] for i in range(0, len(txs), max_unspent)]
return sublist, balance_mine, bip32sendto_all
def make_inputs_for_hw_wallet(
tx,
receiving_address,
collateral_spath,
client,
mpath,
SEND_TO_BIP32):
# trezor and keepkey
import binascii
from decimal import Decimal
if TYPE_HW_WALLET.lower().startswith("keepkey"):
import keepkeylib.messages_pb2 as proto
import keepkeylib.types_pb2 as proto_types
from keepkeylib import tx_api
from keepkeylib.tx_api import TXAPIDashrpc
elif TYPE_HW_WALLET.lower().startswith("trezor"):
import trezorlib.messages_pb2 as proto
import trezorlib.types_pb2 as proto_types
from trezorlib import tx_api
from trezorlib.tx_api import TXAPIDashrpc
tx_api.rpcuser = rpcuser
tx_api.rpcpassword = <PASSWORD>
tx_api.rpcbindip = rpcbindip
tx_api.rpcport = (rpcport if USE_SSH_TUNNEL is False else SSH_LOCAL_PORT)
if 'rpcusessl' in globals() and rpcusessl:
tx_api.rpcusessl = rpcusessl
client.set_tx_api(TXAPIDashrpc())
inputs = []
outputs = []
amount_total = 0
purpose, coin_type, account, change = chain_path(mpath)
if collateral_spath is None or receiving_address is None:
err_msg = 'make_inputs_for_hw_wallet receiving_address / collateral_spath : Should not None'
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
# make input
for x in tx:
amount = x.get('amount', None)
txid = x.get('txid', None)
vout = x.get('vout', None)
if amount is None or txid is None or vout is None:
err_msg = 'make_inputs_for_hw_wallet amount / txid / vout : Should not None'
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
amount_total += amount
inputs.append(
proto_types.TxInputType(
address_n=[
purpose | 0x80000000,
coin_type | 0x80000000,
account | 0x80000000,
change,
int(collateral_spath)],
prev_hash=binascii.unhexlify(txid),
prev_index=vout))
# after dip001
# todo : use estimatesmartfee
txsizefee = 50 + (150 * len(inputs))
# old
#txsizefee = round((len(inputs) * 148 + 33 - 10) / 1000) * min_fee
# minimal fee if input length is < 4, or fee == 0
# if len(inputs) < 4:
#if txsizefee == 0:
# txsizefee = min_fee
# bip32 1 input tx
if SEND_TO_BIP32 and receiving_address == 'BIP32_PATH':
txsizefee = 250
# txsizefee = 2500
# make output based on inputs
if SEND_TO_BIP32 and receiving_address == 'BIP32_PATH':
if len(tx) == 1:
bip32sendto = tx[0].get('bip32sendto', None)
if bip32sendto != None and receiving_address == 'BIP32_PATH':
outputs.append(
proto_types.TxOutputType(
address=bip32sendto,
amount=int(
amount_total *
100000000) -
txsizefee,
script_type=proto_types.PAYTOADDRESS,
))
else:
err_msg = 'bip32_send_to_address is None'
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
else:
err_msg = 'more than 1 tx when making input for bip32_path'
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
else:
outputs.append(
proto_types.TxOutputType(
address=receiving_address,
amount=int(
amount_total *
100000000) -
txsizefee,
script_type=proto_types.PAYTOADDRESS,
))
feetohuman = round(Decimal(txsizefee / 1e8), 6)
if SEND_TO_BIP32 and receiving_address == 'BIP32_PATH':
print('\n\tsend %s\n\t%s txs to %s\n\twith fee of %s\n\ttotal amount : %s\n' % (
amount_total - feetohuman, len(tx), bip32sendto, feetohuman, amount_total))
else:
print('\n\tsend %s\n\t%s txs to %s\n\twith fee of %s\n\ttotal amount : %s\n' % (
amount_total - feetohuman, len(tx), receiving_address, feetohuman, amount_total))
print_hw_wallet_check()
try:
(signatures, serialized_tx) = client.sign_tx(coin_name, inputs, outputs)
# check tx size
if len(serialized_tx.hex()) > 90000:
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
return serialized_tx.hex()
except Exception as e:
err_msg = str(e.args)
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
except KeyboardInterrupt:
print_err_exit(
get_caller_name(),
get_function_name(),
'KeyboardInterrupt')
def make_txs_for_hwwallet(mnconfig, client, mpath, SEND_TO_BIP32):
txs = mnconfig.get('txs', None)
collateral_spath = mnconfig.get('collateral_spath', None)
receiving_address = mnconfig.get('receiving_address', None)
if collateral_spath is None or receiving_address is None:
err_msg = 'make_inputs_for_hw_wallet receiving_address / collateral_spath : Should not be None'
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
serialized_txs = []
if txs is not None:
for tx in txs:
if (len(tx)) >= min_unspent or MOVE_1K_COLLATERAL:
serialized_tx = make_inputs_for_hw_wallet(tx, receiving_address, collateral_spath, client, mpath, SEND_TO_BIP32)
serialized_txs.append(serialized_tx)
else:
print('---> count of txs less than min_unspent : %s' % min_unspent)
return None
else:
return None
return serialized_txs
# end
| import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
from decimal import Decimal
from config import *
from mnb_misc import *
from mnb_rpc import *
from mnb_mnconf import *
from mnb_hwwallet import *
def print_balance(mn_config, have_unconfirmed_tx):
need_wallet_rescan = False
print('\n[masternodes balance]')
print('alias\tcnt\tspn\tbalance\t\taddress to send MN earnings')
total_balance = 0
for m in mn_config:
alias = m.get('alias')
unspent = m.get('collateral_dashd_balance')
sumofunspent = sum(unspent)
cnt = len(unspent)
total_balance = total_balance + sumofunspent
spn = 0
txs_spn = m.get('txs')
for sp in txs_spn:
spn = spn + len(sp)
if cnt == 0:
need_wallet_rescan = True
if 'rpcusessl' in globals() and rpcusessl and rpcbindip == "test.stats.dash.org":
need_wallet_rescan = False
if MOVE_1K_COLLATERAL:
need_wallet_rescan = False
print(
alias +
'\t' +
'{:2d}\t{:2d}\t{:13.8f}'.format(
cnt,
spn,
sumofunspent) +
'\t' +
str(m.get('receiving_address', '----')))
print('\n\t\t Total : ', total_balance)
print('\n* cnt - count : number of payouts(un + mature) + 1(collateral)')
print('* spn - spendable : number of spendable payouts(mature, over 100 confirmation)')
if have_unconfirmed_tx:
print('* can be inaccurate after a transaction(transfer/xfer), need 1 confirmation')
return need_wallet_rescan
def check_mtime_of_tx(unspent_cache_abs_path):
if os.path.exists(unspent_cache_abs_path):
mtime_of_unspent_cache = int(os.path.getmtime(unspent_cache_abs_path))
cache_unspent_statinfo = os.stat(unspent_cache_abs_path)
else:
return True
if cache_unspent_statinfo.st_size == 0:
return True
if time.time() > (mtime_of_unspent_cache + (txs_cache_refresh_interval_hour * 60 * 60)):
return True
return False
def get_unspent_txs(mnconfig, blockcount, access, SEND_TO_BIP32, bip32_unused):
collateral_address = mnconfig.get('collateral_address')
collateral_txidtxidn = mnconfig.get('collateral_txidtxidn')
receiving_address = mnconfig.get('receiving_address')
unspent_cache_abs_path = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'../cache/' +
(
'MAINNET' if MAINNET else 'TESTNET') +
'-' +
collateral_txidtxidn +
'-unspent.dat')
bgetListUnspentAgain = check_mtime_of_tx(unspent_cache_abs_path)
if bgetListUnspentAgain:
#listunspent = get_listunspent(6, 999999999, collateral_address, access)
listunspent = getaddressutxos(collateral_address, access)
with open(unspent_cache_abs_path, 'w') as outfile:
json.dump(listunspent, outfile)
else:
with open(unspent_cache_abs_path) as data_file:
listunspent = json.load(data_file, parse_float=Decimal)
unspent_mine = []
balance_mine = []
for m in listunspent:
unspent_txidtxidn = get_txidtxidn(m['txid'], m['outputIndex'])
#unspent_amount = m['amount']
unspent_amount = round(Decimal(float(m['satoshis'] / 1e8)), 8)
balance_mine.append(unspent_amount)
if MOVE_1K_COLLATERAL:
unspent_mine.append(m)
elif MOVE_1K_COLLATERAL == False:
if (unspent_txidtxidn != collateral_txidtxidn) and (
unspent_amount < max_amounts):
unspent_mine.append(m)
txs = []
bip32sendto_all = []
mature_confirmation = 101
# for testing
#mature_confirmation = 10
desc_displayed = False
for x in unspent_mine:
if (x.get('address') == collateral_address) and ((blockcount - mature_confirmation) > x.get('height')):
if SEND_TO_BIP32 and bip32_unused != None and receiving_address == 'BIP32_PATH':
if not desc_displayed:
print("\t---> getting unused addresses of bip32 path")
desc_displayed = True
bip32sendto_unused = bip32_unused.__next__()
tx = {
"amount": round(Decimal(float(x.get('satoshis') / 1e8)), 8),
"txid": x.get('txid'),
"vout": x.get('outputIndex'),
"bip32sendto": bip32sendto_unused
}
bip32sendto_all.append(bip32sendto_unused)
else:
tx = {
"amount": round(Decimal(float(x.get('satoshis') / 1e8)), 8),
"txid": x.get('txid'),
"vout": x.get('outputIndex')
}
txs.append(tx)
if SEND_TO_BIP32 and bip32_unused != None and receiving_address == 'BIP32_PATH':
sublist = [txs[i:i + 1] for i in range(0, len(txs), 1)]
else:
sublist = [txs[i:i + max_unspent] for i in range(0, len(txs), max_unspent)]
return sublist, balance_mine, bip32sendto_all
def make_inputs_for_hw_wallet(
tx,
receiving_address,
collateral_spath,
client,
mpath,
SEND_TO_BIP32):
# trezor and keepkey
import binascii
from decimal import Decimal
if TYPE_HW_WALLET.lower().startswith("keepkey"):
import keepkeylib.messages_pb2 as proto
import keepkeylib.types_pb2 as proto_types
from keepkeylib import tx_api
from keepkeylib.tx_api import TXAPIDashrpc
elif TYPE_HW_WALLET.lower().startswith("trezor"):
import trezorlib.messages_pb2 as proto
import trezorlib.types_pb2 as proto_types
from trezorlib import tx_api
from trezorlib.tx_api import TXAPIDashrpc
tx_api.rpcuser = rpcuser
tx_api.rpcpassword = <PASSWORD>
tx_api.rpcbindip = rpcbindip
tx_api.rpcport = (rpcport if USE_SSH_TUNNEL is False else SSH_LOCAL_PORT)
if 'rpcusessl' in globals() and rpcusessl:
tx_api.rpcusessl = rpcusessl
client.set_tx_api(TXAPIDashrpc())
inputs = []
outputs = []
amount_total = 0
purpose, coin_type, account, change = chain_path(mpath)
if collateral_spath is None or receiving_address is None:
err_msg = 'make_inputs_for_hw_wallet receiving_address / collateral_spath : Should not None'
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
# make input
for x in tx:
amount = x.get('amount', None)
txid = x.get('txid', None)
vout = x.get('vout', None)
if amount is None or txid is None or vout is None:
err_msg = 'make_inputs_for_hw_wallet amount / txid / vout : Should not None'
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
amount_total += amount
inputs.append(
proto_types.TxInputType(
address_n=[
purpose | 0x80000000,
coin_type | 0x80000000,
account | 0x80000000,
change,
int(collateral_spath)],
prev_hash=binascii.unhexlify(txid),
prev_index=vout))
# after dip001
# todo : use estimatesmartfee
txsizefee = 50 + (150 * len(inputs))
# old
#txsizefee = round((len(inputs) * 148 + 33 - 10) / 1000) * min_fee
# minimal fee if input length is < 4, or fee == 0
# if len(inputs) < 4:
#if txsizefee == 0:
# txsizefee = min_fee
# bip32 1 input tx
if SEND_TO_BIP32 and receiving_address == 'BIP32_PATH':
txsizefee = 250
# txsizefee = 2500
# make output based on inputs
if SEND_TO_BIP32 and receiving_address == 'BIP32_PATH':
if len(tx) == 1:
bip32sendto = tx[0].get('bip32sendto', None)
if bip32sendto != None and receiving_address == 'BIP32_PATH':
outputs.append(
proto_types.TxOutputType(
address=bip32sendto,
amount=int(
amount_total *
100000000) -
txsizefee,
script_type=proto_types.PAYTOADDRESS,
))
else:
err_msg = 'bip32_send_to_address is None'
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
else:
err_msg = 'more than 1 tx when making input for bip32_path'
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
else:
outputs.append(
proto_types.TxOutputType(
address=receiving_address,
amount=int(
amount_total *
100000000) -
txsizefee,
script_type=proto_types.PAYTOADDRESS,
))
feetohuman = round(Decimal(txsizefee / 1e8), 6)
if SEND_TO_BIP32 and receiving_address == 'BIP32_PATH':
print('\n\tsend %s\n\t%s txs to %s\n\twith fee of %s\n\ttotal amount : %s\n' % (
amount_total - feetohuman, len(tx), bip32sendto, feetohuman, amount_total))
else:
print('\n\tsend %s\n\t%s txs to %s\n\twith fee of %s\n\ttotal amount : %s\n' % (
amount_total - feetohuman, len(tx), receiving_address, feetohuman, amount_total))
print_hw_wallet_check()
try:
(signatures, serialized_tx) = client.sign_tx(coin_name, inputs, outputs)
# check tx size
if len(serialized_tx.hex()) > 90000:
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
return serialized_tx.hex()
except Exception as e:
err_msg = str(e.args)
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
except KeyboardInterrupt:
print_err_exit(
get_caller_name(),
get_function_name(),
'KeyboardInterrupt')
def make_txs_for_hwwallet(mnconfig, client, mpath, SEND_TO_BIP32):
txs = mnconfig.get('txs', None)
collateral_spath = mnconfig.get('collateral_spath', None)
receiving_address = mnconfig.get('receiving_address', None)
if collateral_spath is None or receiving_address is None:
err_msg = 'make_inputs_for_hw_wallet receiving_address / collateral_spath : Should not be None'
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
serialized_txs = []
if txs is not None:
for tx in txs:
if (len(tx)) >= min_unspent or MOVE_1K_COLLATERAL:
serialized_tx = make_inputs_for_hw_wallet(tx, receiving_address, collateral_spath, client, mpath, SEND_TO_BIP32)
serialized_txs.append(serialized_tx)
else:
print('---> count of txs less than min_unspent : %s' % min_unspent)
return None
else:
return None
return serialized_txs
# end | en | 0.583712 | #listunspent = get_listunspent(6, 999999999, collateral_address, access) #unspent_amount = m['amount'] # for testing #mature_confirmation = 10 # trezor and keepkey # make input # after dip001 # todo : use estimatesmartfee # old #txsizefee = round((len(inputs) * 148 + 33 - 10) / 1000) * min_fee # minimal fee if input length is < 4, or fee == 0 # if len(inputs) < 4: #if txsizefee == 0: # txsizefee = min_fee # bip32 1 input tx # txsizefee = 2500 # make output based on inputs # check tx size # end | 2.239682 | 2 |
python/venv/lib/python2.7/site-packages/keystoneauth1/tests/unit/loading/test_conf.py | sjsucohort6/openstack | 0 | 6633013 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_config import cfg
from oslo_config import fixture as config
import stevedore
from keystoneauth1 import exceptions
from keystoneauth1 import loading
from keystoneauth1.loading._plugins.identity import v2
from keystoneauth1.loading._plugins.identity import v3
from keystoneauth1.tests.unit.loading import utils
def to_oslo_opts(opts):
return [o._to_oslo_opt() for o in opts]
class ConfTests(utils.TestCase):
def setUp(self):
super(ConfTests, self).setUp()
self.conf_fixture = self.useFixture(config.Config())
# NOTE(jamielennox): we register the basic config options first because
# we need them in place before we can stub them. We will need to run
# the register again after we stub the auth section and auth plugin so
# it can load the plugin specific options.
loading.register_auth_conf_options(self.conf_fixture.conf,
group=self.GROUP)
def test_loading_v2(self):
section = uuid.uuid4().hex
auth_url = uuid.uuid4().hex
username = uuid.uuid4().hex
password = uuid.uuid4().hex
trust_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
self.conf_fixture.config(auth_section=section, group=self.GROUP)
loading.register_auth_conf_options(self.conf_fixture.conf,
group=self.GROUP)
self.conf_fixture.register_opts(
to_oslo_opts(v2.Password().get_options()),
group=section)
self.conf_fixture.config(auth_type=self.V2PASS,
auth_url=auth_url,
username=username,
password=password,
trust_id=trust_id,
tenant_id=tenant_id,
group=section)
a = loading.load_auth_from_conf_options(self.conf_fixture.conf,
self.GROUP)
self.assertEqual(auth_url, a.auth_url)
self.assertEqual(username, a.username)
self.assertEqual(password, <PASSWORD>)
self.assertEqual(trust_id, a.trust_id)
self.assertEqual(tenant_id, a.tenant_id)
def test_loading_v3(self):
section = uuid.uuid4().hex
auth_url = uuid.uuid4().hex,
token = uuid.uuid4().hex
trust_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
project_domain_name = uuid.uuid4().hex
self.conf_fixture.config(auth_section=section, group=self.GROUP)
loading.register_auth_conf_options(self.conf_fixture.conf,
group=self.GROUP)
self.conf_fixture.register_opts(to_oslo_opts(v3.Token().get_options()),
group=section)
self.conf_fixture.config(auth_type=self.V3TOKEN,
auth_url=auth_url,
token=token,
trust_id=trust_id,
project_id=project_id,
project_domain_name=project_domain_name,
group=section)
a = loading.load_auth_from_conf_options(self.conf_fixture.conf,
self.GROUP)
self.assertEqual(token, a.auth_methods[0].token)
self.assertEqual(trust_id, a.trust_id)
self.assertEqual(project_id, a.project_id)
self.assertEqual(project_domain_name, a.project_domain_name)
def test_loading_invalid_plugin(self):
auth_type = uuid.uuid4().hex
self.conf_fixture.config(auth_type=auth_type,
group=self.GROUP)
e = self.assertRaises(exceptions.NoMatchingPlugin,
loading.load_auth_from_conf_options,
self.conf_fixture.conf,
self.GROUP)
self.assertEqual(auth_type, e.name)
def test_loading_with_no_data(self):
l = loading.load_auth_from_conf_options(self.conf_fixture.conf,
self.GROUP)
self.assertIsNone(l)
@mock.patch('stevedore.DriverManager')
def test_other_params(self, m):
m.return_value = utils.MockManager(utils.MockLoader())
driver_name = uuid.uuid4().hex
self.conf_fixture.register_opts(
to_oslo_opts(utils.MockLoader().get_options()),
group=self.GROUP)
self.conf_fixture.config(auth_type=driver_name,
group=self.GROUP,
**self.TEST_VALS)
a = loading.load_auth_from_conf_options(self.conf_fixture.conf,
self.GROUP)
self.assertTestVals(a)
m.assert_called_once_with(namespace=loading.PLUGIN_NAMESPACE,
name=driver_name,
invoke_on_load=True)
@utils.mock_plugin()
def test_same_section(self, m):
self.conf_fixture.register_opts(
to_oslo_opts(utils.MockLoader().get_options()),
group=self.GROUP)
loading.register_auth_conf_options(self.conf_fixture.conf,
group=self.GROUP)
self.conf_fixture.config(auth_type=uuid.uuid4().hex,
group=self.GROUP,
**self.TEST_VALS)
a = loading.load_auth_from_conf_options(self.conf_fixture.conf,
self.GROUP)
self.assertTestVals(a)
@utils.mock_plugin()
def test_diff_section(self, m):
section = uuid.uuid4().hex
self.conf_fixture.config(auth_section=section, group=self.GROUP)
loading.register_auth_conf_options(self.conf_fixture.conf,
group=self.GROUP)
self.conf_fixture.register_opts(to_oslo_opts(
utils.MockLoader().get_options()),
group=section)
self.conf_fixture.config(group=section,
auth_type=uuid.uuid4().hex,
**self.TEST_VALS)
a = loading.load_auth_from_conf_options(self.conf_fixture.conf,
self.GROUP)
self.assertTestVals(a)
def test_plugins_are_all_opts(self):
manager = stevedore.ExtensionManager(loading.PLUGIN_NAMESPACE,
propagate_map_exceptions=True)
def inner(driver):
for p in driver.plugin().get_options():
self.assertIsInstance(p, loading.Opt)
manager.map(inner)
def test_get_common(self):
opts = loading.get_auth_common_conf_options()
for opt in opts:
self.assertIsInstance(opt, cfg.Opt)
self.assertEqual(2, len(opts))
def test_get_named(self):
loaded_opts = loading.get_plugin_options('v2password')
plugin_opts = v2.Password().get_options()
loaded_names = set([o.name for o in loaded_opts])
plugin_names = set([o.name for o in plugin_opts])
self.assertEqual(plugin_names, loaded_names)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_config import cfg
from oslo_config import fixture as config
import stevedore
from keystoneauth1 import exceptions
from keystoneauth1 import loading
from keystoneauth1.loading._plugins.identity import v2
from keystoneauth1.loading._plugins.identity import v3
from keystoneauth1.tests.unit.loading import utils
def to_oslo_opts(opts):
return [o._to_oslo_opt() for o in opts]
class ConfTests(utils.TestCase):
def setUp(self):
super(ConfTests, self).setUp()
self.conf_fixture = self.useFixture(config.Config())
# NOTE(jamielennox): we register the basic config options first because
# we need them in place before we can stub them. We will need to run
# the register again after we stub the auth section and auth plugin so
# it can load the plugin specific options.
loading.register_auth_conf_options(self.conf_fixture.conf,
group=self.GROUP)
def test_loading_v2(self):
section = uuid.uuid4().hex
auth_url = uuid.uuid4().hex
username = uuid.uuid4().hex
password = uuid.uuid4().hex
trust_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
self.conf_fixture.config(auth_section=section, group=self.GROUP)
loading.register_auth_conf_options(self.conf_fixture.conf,
group=self.GROUP)
self.conf_fixture.register_opts(
to_oslo_opts(v2.Password().get_options()),
group=section)
self.conf_fixture.config(auth_type=self.V2PASS,
auth_url=auth_url,
username=username,
password=password,
trust_id=trust_id,
tenant_id=tenant_id,
group=section)
a = loading.load_auth_from_conf_options(self.conf_fixture.conf,
self.GROUP)
self.assertEqual(auth_url, a.auth_url)
self.assertEqual(username, a.username)
self.assertEqual(password, <PASSWORD>)
self.assertEqual(trust_id, a.trust_id)
self.assertEqual(tenant_id, a.tenant_id)
def test_loading_v3(self):
section = uuid.uuid4().hex
auth_url = uuid.uuid4().hex,
token = uuid.uuid4().hex
trust_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
project_domain_name = uuid.uuid4().hex
self.conf_fixture.config(auth_section=section, group=self.GROUP)
loading.register_auth_conf_options(self.conf_fixture.conf,
group=self.GROUP)
self.conf_fixture.register_opts(to_oslo_opts(v3.Token().get_options()),
group=section)
self.conf_fixture.config(auth_type=self.V3TOKEN,
auth_url=auth_url,
token=token,
trust_id=trust_id,
project_id=project_id,
project_domain_name=project_domain_name,
group=section)
a = loading.load_auth_from_conf_options(self.conf_fixture.conf,
self.GROUP)
self.assertEqual(token, a.auth_methods[0].token)
self.assertEqual(trust_id, a.trust_id)
self.assertEqual(project_id, a.project_id)
self.assertEqual(project_domain_name, a.project_domain_name)
def test_loading_invalid_plugin(self):
auth_type = uuid.uuid4().hex
self.conf_fixture.config(auth_type=auth_type,
group=self.GROUP)
e = self.assertRaises(exceptions.NoMatchingPlugin,
loading.load_auth_from_conf_options,
self.conf_fixture.conf,
self.GROUP)
self.assertEqual(auth_type, e.name)
def test_loading_with_no_data(self):
l = loading.load_auth_from_conf_options(self.conf_fixture.conf,
self.GROUP)
self.assertIsNone(l)
@mock.patch('stevedore.DriverManager')
def test_other_params(self, m):
m.return_value = utils.MockManager(utils.MockLoader())
driver_name = uuid.uuid4().hex
self.conf_fixture.register_opts(
to_oslo_opts(utils.MockLoader().get_options()),
group=self.GROUP)
self.conf_fixture.config(auth_type=driver_name,
group=self.GROUP,
**self.TEST_VALS)
a = loading.load_auth_from_conf_options(self.conf_fixture.conf,
self.GROUP)
self.assertTestVals(a)
m.assert_called_once_with(namespace=loading.PLUGIN_NAMESPACE,
name=driver_name,
invoke_on_load=True)
@utils.mock_plugin()
def test_same_section(self, m):
self.conf_fixture.register_opts(
to_oslo_opts(utils.MockLoader().get_options()),
group=self.GROUP)
loading.register_auth_conf_options(self.conf_fixture.conf,
group=self.GROUP)
self.conf_fixture.config(auth_type=uuid.uuid4().hex,
group=self.GROUP,
**self.TEST_VALS)
a = loading.load_auth_from_conf_options(self.conf_fixture.conf,
self.GROUP)
self.assertTestVals(a)
@utils.mock_plugin()
def test_diff_section(self, m):
section = uuid.uuid4().hex
self.conf_fixture.config(auth_section=section, group=self.GROUP)
loading.register_auth_conf_options(self.conf_fixture.conf,
group=self.GROUP)
self.conf_fixture.register_opts(to_oslo_opts(
utils.MockLoader().get_options()),
group=section)
self.conf_fixture.config(group=section,
auth_type=uuid.uuid4().hex,
**self.TEST_VALS)
a = loading.load_auth_from_conf_options(self.conf_fixture.conf,
self.GROUP)
self.assertTestVals(a)
def test_plugins_are_all_opts(self):
manager = stevedore.ExtensionManager(loading.PLUGIN_NAMESPACE,
propagate_map_exceptions=True)
def inner(driver):
for p in driver.plugin().get_options():
self.assertIsInstance(p, loading.Opt)
manager.map(inner)
def test_get_common(self):
opts = loading.get_auth_common_conf_options()
for opt in opts:
self.assertIsInstance(opt, cfg.Opt)
self.assertEqual(2, len(opts))
def test_get_named(self):
loaded_opts = loading.get_plugin_options('v2password')
plugin_opts = v2.Password().get_options()
loaded_names = set([o.name for o in loaded_opts])
plugin_names = set([o.name for o in plugin_opts])
self.assertEqual(plugin_names, loaded_names)
| en | 0.853583 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(jamielennox): we register the basic config options first because # we need them in place before we can stub them. We will need to run # the register again after we stub the auth section and auth plugin so # it can load the plugin specific options. | 1.873384 | 2 |
iblvideo/tests/test_choiceworld.py | int-brain-lab/iblvideo | 4 | 6633014 | import shutil
import numpy as np
import pandas as pd
from one.api import ONE
from iblvideo.choiceworld import dlc
from iblvideo.weights import download_weights
from iblvideo.tests import _download_dlc_test_data
from iblvideo import __version__
def test_dlc(version=__version__):
one = ONE()
test_data = _download_dlc_test_data(one=one)
path_dlc = download_weights(version=version, one=one)
for cam in ['body', 'left', 'right']:
file_mp4 = test_data.joinpath('input', f'_iblrig_{cam}Camera.raw.mp4')
tmp_dir = test_data.joinpath('input', f'dlc_tmp_iblrig_{cam}Camera.raw')
out_file, _ = dlc(file_mp4, path_dlc)
assert out_file
assert (tmp_dir.is_dir() is False)
out_pqt = pd.read_parquet(out_file)
ctrl_pqt = pd.read_parquet(
test_data.joinpath('output', f'_ibl_{cam}Camera.dlc.pqt'))
assert np.all(out_pqt.columns == ctrl_pqt.columns)
# only compare entries with likelihood over 0.9
targets = np.unique(['_'.join(col.split('_')[:-1]) for col in ctrl_pqt.columns])
for t in targets:
idx_ctrl = ctrl_pqt.loc[ctrl_pqt[f'{t}_likelihood'] < 0.9].index
idx_out = out_pqt.loc[out_pqt[f'{t}_likelihood'] < 0.9].index
for idx in [idx_ctrl, idx_out]:
ctrl_pqt.loc[idx, [f'{t}_x', f'{t}_y', f'{t}_likelihood']] = np.nan
out_pqt.loc[idx, [f'{t}_x', f'{t}_y', f'{t}_likelihood']] = np.nan
try:
assert np.allclose(np.array(out_pqt), np.array(ctrl_pqt), rtol=10e-2, equal_nan=True)
except AssertionError:
diff = np.abs(np.array(out_pqt) - np.array(ctrl_pqt))
out_pqt.to_parquet(test_data.joinpath(f'_ibl_{cam}Camera.dlc.failed.pqt'))
print(np.nanmax(diff, axis=0), np.nanmean(diff, axis=0))
assert np.allclose(np.array(out_pqt), np.array(ctrl_pqt), rtol=10e-2, equal_nan=True)
alf_path = test_data.joinpath('alf')
shutil.rmtree(alf_path)
| import shutil
import numpy as np
import pandas as pd
from one.api import ONE
from iblvideo.choiceworld import dlc
from iblvideo.weights import download_weights
from iblvideo.tests import _download_dlc_test_data
from iblvideo import __version__
def test_dlc(version=__version__):
one = ONE()
test_data = _download_dlc_test_data(one=one)
path_dlc = download_weights(version=version, one=one)
for cam in ['body', 'left', 'right']:
file_mp4 = test_data.joinpath('input', f'_iblrig_{cam}Camera.raw.mp4')
tmp_dir = test_data.joinpath('input', f'dlc_tmp_iblrig_{cam}Camera.raw')
out_file, _ = dlc(file_mp4, path_dlc)
assert out_file
assert (tmp_dir.is_dir() is False)
out_pqt = pd.read_parquet(out_file)
ctrl_pqt = pd.read_parquet(
test_data.joinpath('output', f'_ibl_{cam}Camera.dlc.pqt'))
assert np.all(out_pqt.columns == ctrl_pqt.columns)
# only compare entries with likelihood over 0.9
targets = np.unique(['_'.join(col.split('_')[:-1]) for col in ctrl_pqt.columns])
for t in targets:
idx_ctrl = ctrl_pqt.loc[ctrl_pqt[f'{t}_likelihood'] < 0.9].index
idx_out = out_pqt.loc[out_pqt[f'{t}_likelihood'] < 0.9].index
for idx in [idx_ctrl, idx_out]:
ctrl_pqt.loc[idx, [f'{t}_x', f'{t}_y', f'{t}_likelihood']] = np.nan
out_pqt.loc[idx, [f'{t}_x', f'{t}_y', f'{t}_likelihood']] = np.nan
try:
assert np.allclose(np.array(out_pqt), np.array(ctrl_pqt), rtol=10e-2, equal_nan=True)
except AssertionError:
diff = np.abs(np.array(out_pqt) - np.array(ctrl_pqt))
out_pqt.to_parquet(test_data.joinpath(f'_ibl_{cam}Camera.dlc.failed.pqt'))
print(np.nanmax(diff, axis=0), np.nanmean(diff, axis=0))
assert np.allclose(np.array(out_pqt), np.array(ctrl_pqt), rtol=10e-2, equal_nan=True)
alf_path = test_data.joinpath('alf')
shutil.rmtree(alf_path)
| en | 0.901501 | # only compare entries with likelihood over 0.9 | 2.445482 | 2 |
training/tokenisation.py | dice-group/NETL-Automatic-Topic-Labelling- | 176 | 6633015 | <reponame>dice-group/NETL-Automatic-Topic-Labelling-<gh_stars>100-1000
"""
Author: <NAME>
Date: October 2016
File: tokenisation.py
It takes in processed xml dump extraced by WikiExtractor and tokenises it using standford-parser for tokenization.
You can use any of the below URL to download it and unzip it if want to run on your own.
http://nlp.stanford.edu/software/stanford-parser-full-2014-08-27.zip
The arguments for this file are given in main_train.py.
"""
import os
import argparse
import sys
# Get the arguments passed in main_train.py
parser = argparse.ArgumentParser()
parser.add_argument("parser_loc") # location of stanford parser giben in main_train.py
parser.add_argument("input_dir") # Input diretory which is output of wiki-extractor processed xml dump
parser.add_argument("output_dir")# Output directory for tokenised file
args = parser.parse_args()
# Checks if the output directory specified already exists. If it does removes it.
if os.path.isdir(args.output_dir):
del_query = "rm -r "+args.output_dir
os.system(del_query)
# Gets all the sub directories from the location
list_files = os.listdir(args.input_dir)
# Gets the classpath to run stanford tokenizer
classpath = args.parser_loc +"/stanford-parser.jar"
query1 = "mkdir "+args.output_dir
os.system(query1)
for item in list_files:
if os.path.isdir(args.input_dir+"/"+item):
inp_subdir =args.input_dir +"/"+ item # Getting the full path for subdirectories which needs to be tokenized.
subfiles = os.listdir(inp_subdir) # listing the files in subdirectory
out_subdir = args.output_dir +"/"+ item
query = "mkdir " +out_subdir # making new sub directories in output location, so that the directory structure of tokenised file is same as input directory
os.system(query)
for elem in subfiles:
input_file = inp_subdir + "/"+elem # Working on files in subdirectory. We need to tokenize them
output_file = out_subdir + "/"+elem
query2 = "java -cp "+ classpath +" edu.stanford.nlp.process.PTBTokenizer -preserveLines --lowerCase <"+input_file+"> "+output_file # Java commanf to stanford tokenizer
print "Executing query"
print query2
os.system(query2)
| """
Author: <NAME>
Date: October 2016
File: tokenisation.py
It takes in processed xml dump extraced by WikiExtractor and tokenises it using standford-parser for tokenization.
You can use any of the below URL to download it and unzip it if want to run on your own.
http://nlp.stanford.edu/software/stanford-parser-full-2014-08-27.zip
The arguments for this file are given in main_train.py.
"""
import os
import argparse
import sys
# Get the arguments passed in main_train.py
parser = argparse.ArgumentParser()
parser.add_argument("parser_loc") # location of stanford parser giben in main_train.py
parser.add_argument("input_dir") # Input diretory which is output of wiki-extractor processed xml dump
parser.add_argument("output_dir")# Output directory for tokenised file
args = parser.parse_args()
# Checks if the output directory specified already exists. If it does removes it.
if os.path.isdir(args.output_dir):
del_query = "rm -r "+args.output_dir
os.system(del_query)
# Gets all the sub directories from the location
list_files = os.listdir(args.input_dir)
# Gets the classpath to run stanford tokenizer
classpath = args.parser_loc +"/stanford-parser.jar"
query1 = "mkdir "+args.output_dir
os.system(query1)
for item in list_files:
if os.path.isdir(args.input_dir+"/"+item):
inp_subdir =args.input_dir +"/"+ item # Getting the full path for subdirectories which needs to be tokenized.
subfiles = os.listdir(inp_subdir) # listing the files in subdirectory
out_subdir = args.output_dir +"/"+ item
query = "mkdir " +out_subdir # making new sub directories in output location, so that the directory structure of tokenised file is same as input directory
os.system(query)
for elem in subfiles:
input_file = inp_subdir + "/"+elem # Working on files in subdirectory. We need to tokenize them
output_file = out_subdir + "/"+elem
query2 = "java -cp "+ classpath +" edu.stanford.nlp.process.PTBTokenizer -preserveLines --lowerCase <"+input_file+"> "+output_file # Java commanf to stanford tokenizer
print "Executing query"
print query2
os.system(query2) | en | 0.827296 | Author: <NAME> Date: October 2016 File: tokenisation.py It takes in processed xml dump extraced by WikiExtractor and tokenises it using standford-parser for tokenization. You can use any of the below URL to download it and unzip it if want to run on your own. http://nlp.stanford.edu/software/stanford-parser-full-2014-08-27.zip The arguments for this file are given in main_train.py. # Get the arguments passed in main_train.py # location of stanford parser giben in main_train.py # Input diretory which is output of wiki-extractor processed xml dump # Output directory for tokenised file # Checks if the output directory specified already exists. If it does removes it. # Gets all the sub directories from the location # Gets the classpath to run stanford tokenizer # Getting the full path for subdirectories which needs to be tokenized. # listing the files in subdirectory # making new sub directories in output location, so that the directory structure of tokenised file is same as input directory # Working on files in subdirectory. We need to tokenize them # Java commanf to stanford tokenizer | 2.935252 | 3 |
Skill_Development_Center/Individual_Assignments/Prethvi_Raj/Task_3.py | Pirouz-Nourian/earthy_18 | 1 | 6633016 | <reponame>Pirouz-Nourian/earthy_18
import Rhino.Geometry as rg
import math as math
import rhinoscriptsyntax as rs
Bbox =S.GetBoundingBox(True)
W = Bbox.Diagonal.X
L = Bbox.Diagonal.Y
H = Bbox.Diagonal.Z
#print(W)
XC = int(math.ceil(W/xS))
YC = int(math.ceil(L/yS))
ZC = int(math.ceil(H/zS))
points = []
distList = []
print(Bbox)
bPoint = Bbox.Min
print(bPoint)
bXV = rg.Vector3d.XAxis
bYV = rg.Vector3d.YAxis
xShift = xS/2
yShift = yS/2
zShift = zS/2
bplane=rg.Plane(bPoint, bXV, bYV)
for i in range(0,XC):
for j in range(0,YC):
for k in range(0,ZC):
point = bplane.PointAt(i*xS+xShift,j*yS+yShift,k*zS+zShift)
points.append(point)
cPoint = S.ClosestPoint(point)
distance = point.DistanceTo(cPoint)
if(S.IsPointInside(point, 0.1, True)):
distance = -distance
else:
distance = distance
distList.append(distance)
#print(distList)
b = points
c = distList | import Rhino.Geometry as rg
import math as math
import rhinoscriptsyntax as rs
Bbox =S.GetBoundingBox(True)
W = Bbox.Diagonal.X
L = Bbox.Diagonal.Y
H = Bbox.Diagonal.Z
#print(W)
XC = int(math.ceil(W/xS))
YC = int(math.ceil(L/yS))
ZC = int(math.ceil(H/zS))
points = []
distList = []
print(Bbox)
bPoint = Bbox.Min
print(bPoint)
bXV = rg.Vector3d.XAxis
bYV = rg.Vector3d.YAxis
xShift = xS/2
yShift = yS/2
zShift = zS/2
bplane=rg.Plane(bPoint, bXV, bYV)
for i in range(0,XC):
for j in range(0,YC):
for k in range(0,ZC):
point = bplane.PointAt(i*xS+xShift,j*yS+yShift,k*zS+zShift)
points.append(point)
cPoint = S.ClosestPoint(point)
distance = point.DistanceTo(cPoint)
if(S.IsPointInside(point, 0.1, True)):
distance = -distance
else:
distance = distance
distList.append(distance)
#print(distList)
b = points
c = distList | ru | 0.191288 | #print(W) #print(distList) | 2.279289 | 2 |
tracker/app/models/ddr.py | skielred/FairyJokeAPI | 3 | 6633017 | <reponame>skielred/FairyJokeAPI
import enum
import sqlalchemy as sa
from sqlalchemy import orm
from app import db
from app.utils.badges import FCBadges
from app.utils.enumerable import Enumerable
class DDRLocalChart(db.IdMixin, db.Base):
title = sa.Column(sa.String)
artist = sa.Column(sa.String)
step_artist = sa.Column(sa.String)
difficulty = sa.Column(sa.String)
level = sa.Column(sa.Integer)
class DDRScore(db.ExScoreMixin, db.Base):
class Mods(Enumerable):
TURN = {'MIRROR', 'LEFT', 'RIGHT', 'SHUFFLE'}
STEP_ZONE = {'OFF'}
SPEED = set(map(
lambda x: x / 100,
[*range(25, 400, 25), *range(400, 800, 50)]
))
ARROW_MOVE = {'BOOST', 'BRAKE', 'WAVE'}
SCROLL = {'REVERSE'}
CUT = {
'ON1', # Only shows 1/4s
'ON2', # Only shows 1/8s
}
FREEZE_ARROW = {'OFF'}
JUMP = {'OFF'}
LIFE_GAUGE = {'LIFE4', 'RISKY'}
SCREEN_FILTER = {'DARK', 'DARKER', 'DARKEST'}
GUIDELINE = {'BORDER', 'CENTER'}
class Clears(enum.Enum):
fail = 'FAIL'
play = 'PLAYED'
clear = 'CLEARED'
fc = 'FULLCOMBO'
gfc = 'GREAT FULLCOMBO'
pfc = 'PERFECT FULLCOMBO'
mfc = 'MARVELOUS FULLCOMBO'
api_chart_id = sa.Column(sa.Integer)
local_chart_id = sa.Column(sa.ForeignKey('ddr_local_charts.id'))
clear_type = sa.Column(sa.Enum(Clears))
mods = orm.relationship('DDRScoreMod')
chart = orm.relationship('DDRLocalChart', backref='scores')
class Badges(FCBadges):
MFC = 'MARVELOUS FULLCOMBO'
PFC = 'PERFECT FULLCOMBO'
GFC = 'GREAT FULLCOMBO'
@classmethod
def from_score(cls, score):
judges = score.judges_obj
if not judges.good + judges.miss == 0:
return super().from_score(score)
if judges.great + judges.perfect == 0:
return [cls.MFC]
if judges.great == 0:
return [cls.PFC]
return [cls.GFC]
@property
def badges(self):
return self.Badges.from_score(self)
class DDRScoreMod(db.IdMixin, db.Base):
score_id = sa.Column(sa.ForeignKey('ddr_scores.id'), nullable=False)
name = sa.Column(sa.Enum(*DDRScore.Mods.keys()), nullable=False)
value = sa.Column(sa.String)
score = orm.relationship('DDRScore', back_populates='mods')
| import enum
import sqlalchemy as sa
from sqlalchemy import orm
from app import db
from app.utils.badges import FCBadges
from app.utils.enumerable import Enumerable
class DDRLocalChart(db.IdMixin, db.Base):
title = sa.Column(sa.String)
artist = sa.Column(sa.String)
step_artist = sa.Column(sa.String)
difficulty = sa.Column(sa.String)
level = sa.Column(sa.Integer)
class DDRScore(db.ExScoreMixin, db.Base):
class Mods(Enumerable):
TURN = {'MIRROR', 'LEFT', 'RIGHT', 'SHUFFLE'}
STEP_ZONE = {'OFF'}
SPEED = set(map(
lambda x: x / 100,
[*range(25, 400, 25), *range(400, 800, 50)]
))
ARROW_MOVE = {'BOOST', 'BRAKE', 'WAVE'}
SCROLL = {'REVERSE'}
CUT = {
'ON1', # Only shows 1/4s
'ON2', # Only shows 1/8s
}
FREEZE_ARROW = {'OFF'}
JUMP = {'OFF'}
LIFE_GAUGE = {'LIFE4', 'RISKY'}
SCREEN_FILTER = {'DARK', 'DARKER', 'DARKEST'}
GUIDELINE = {'BORDER', 'CENTER'}
class Clears(enum.Enum):
fail = 'FAIL'
play = 'PLAYED'
clear = 'CLEARED'
fc = 'FULLCOMBO'
gfc = 'GREAT FULLCOMBO'
pfc = 'PERFECT FULLCOMBO'
mfc = 'MARVELOUS FULLCOMBO'
api_chart_id = sa.Column(sa.Integer)
local_chart_id = sa.Column(sa.ForeignKey('ddr_local_charts.id'))
clear_type = sa.Column(sa.Enum(Clears))
mods = orm.relationship('DDRScoreMod')
chart = orm.relationship('DDRLocalChart', backref='scores')
class Badges(FCBadges):
MFC = 'MARVELOUS FULLCOMBO'
PFC = 'PERFECT FULLCOMBO'
GFC = 'GREAT FULLCOMBO'
@classmethod
def from_score(cls, score):
judges = score.judges_obj
if not judges.good + judges.miss == 0:
return super().from_score(score)
if judges.great + judges.perfect == 0:
return [cls.MFC]
if judges.great == 0:
return [cls.PFC]
return [cls.GFC]
@property
def badges(self):
return self.Badges.from_score(self)
class DDRScoreMod(db.IdMixin, db.Base):
score_id = sa.Column(sa.ForeignKey('ddr_scores.id'), nullable=False)
name = sa.Column(sa.Enum(*DDRScore.Mods.keys()), nullable=False)
value = sa.Column(sa.String)
score = orm.relationship('DDRScore', back_populates='mods') | en | 0.788646 | # Only shows 1/4s # Only shows 1/8s | 2.217332 | 2 |
src/setup.py | akiyoko/django-simple-serializer | 180 | 6633018 | <filename>src/setup.py
import codecs
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
NAME = "django-simple-serializer"
PACKAGES = ["dss", ]
DESCRIPTION = "Django Simple Serializer is a serializer to help user serialize django data or python list into json,xml,dict data in a simple way."
LONG_DESCRIPTION = read("README.rst")
KEYWORDS = "django serializer"
AUTHOR = "RaPoSpectre"
AUTHOR_EMAIL = "<EMAIL>"
URL = "https://github.com/bluedazzle/django-simple-serializer"
VERSION = "2.0.7"
LICENSE = "MIT"
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
],
install_requires=[
'future'
],
keywords=KEYWORDS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=LICENSE,
packages=PACKAGES,
include_package_data=True,
zip_safe=True,
)
| <filename>src/setup.py
import codecs
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
NAME = "django-simple-serializer"
PACKAGES = ["dss", ]
DESCRIPTION = "Django Simple Serializer is a serializer to help user serialize django data or python list into json,xml,dict data in a simple way."
LONG_DESCRIPTION = read("README.rst")
KEYWORDS = "django serializer"
AUTHOR = "RaPoSpectre"
AUTHOR_EMAIL = "<EMAIL>"
URL = "https://github.com/bluedazzle/django-simple-serializer"
VERSION = "2.0.7"
LICENSE = "MIT"
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
],
install_requires=[
'future'
],
keywords=KEYWORDS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=LICENSE,
packages=PACKAGES,
include_package_data=True,
zip_safe=True,
)
| none | 1 | 1.654601 | 2 |
|
Scripts/simulation/story_progression/story_progression_action_career.py | velocist/TS4CheatsInfo | 0 | 6633019 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\story_progression\story_progression_action_career.py
# Compiled at: 2015-05-01 22:45:36
# Size of source mod 2**32: 5363 bytes
import random
from sims4.repr_utils import standard_repr
from sims4.tuning.tunable import AutoFactoryInit, HasTunableSingletonFactory, TunableVariant
from story_progression.story_progression_action_base import StoryProgressionAction
from story_progression.story_progression_agents import StoryProgressionAgentSimInfo
import services, sims4.resources
class _CareerSubaction:
def __init__(self, sim_info):
self._sim_info = sim_info
self._sim_info_agent = StoryProgressionAgentSimInfo(sim_info)
def __repr__(self):
return standard_repr(self)
def save(self, data):
raise NotImplementedError
def execute_subaction(self):
raise NotImplementedError
class _CareerSubactionFactory(HasTunableSingletonFactory, AutoFactoryInit):
def load(self, data):
raise NotImplementedError
def get_potenial_subactions_gen(self, sim_info):
raise NotImplementedError
class _CareerSubactionJoin(_CareerSubaction):
def __init__(self, *args, career, **kwargs):
(super().__init__)(*args, **kwargs)
self._career = career
def __repr__(self):
return standard_repr(self, career=(self._career.__name__))
def save(self, data):
data.custom_guid = self._career.guid64
def execute_subaction(self):
user_level = random.randint(1, self._career.get_max_user_level())
self._sim_info.career_tracker.add_career((self._career(self._sim_info)), user_level_override=user_level,
give_skipped_rewards=False)
def update_demographics(self, demographics):
sim_info_agent = self._sim_info_agent.get_agent_clone(career=(self._career))
for demographic in demographics:
demographic.remove_sim_info_agent(self._sim_info_agent)
demographic.add_sim_info_agent(sim_info_agent)
class _CareerSubactionFactoryJoin(_CareerSubactionFactory):
def load(self, sim_info, data):
career_manager = services.get_instance_manager(sims4.resources.Types.CAREER)
career = career_manager.get(data.custom_guid)
if career is None:
raise TypeError
return _CareerSubactionJoin(sim_info, career=career)
def get_potenial_subactions_gen(self, sim_info):
career_service = services.get_career_service()
for career in career_service.get_career_list():
if career.career_story_progression.joining is not None:
yield _CareerSubactionJoin(sim_info, career=career)
class _CareerSubactionFactoryQuit(_CareerSubactionFactory):
pass
class _CareerSubactionFactoryFired(_CareerSubactionFactory):
pass
class _CareerSubactionFactoryRetire(_CareerSubactionFactory):
pass
class _CareerSubactionFactoryPromoted(_CareerSubactionFactory):
pass
class _CareerSubactionFactoryDemoted(_CareerSubactionFactory):
pass
class StoryProgressionActionCareer(StoryProgressionAction):
INSTANCE_TUNABLES = {'career_subaction': TunableVariant(description='\n The career operation to apply for this action.\n ',
join=(_CareerSubactionFactoryJoin.TunableFactory()),
quit=(_CareerSubactionFactoryQuit.TunableFactory()),
fired=(_CareerSubactionFactoryFired.TunableFactory()),
retire=(_CareerSubactionFactoryRetire.TunableFactory()),
promoted=(_CareerSubactionFactoryPromoted.TunableFactory()),
demoted=(_CareerSubactionFactoryDemoted.TunableFactory()),
default='join')}
def __init__(self, *args, career_subaction=None, **kwargs):
(super().__init__)(*args, **kwargs)
self._career_subaction = career_subaction
def __repr__(self):
return standard_repr(self, subaction=(self._career_subaction))
def load(self, data):
super().load(data)
self._career_subaction = self.career_subaction.load(self._sim_info, data)
def save(self, data):
super().save(data)
self._career_subaction.save(data)
@classmethod
def get_potential_actions_gen(cls, sim_info):
for career_subaction in cls.career_subaction.get_potenial_subactions_gen(sim_info):
yield cls(sim_info, career_subaction=career_subaction)
def execute_action(self):
return self._career_subaction.execute_subaction()
def update_demographics(self, demographics):
self._career_subaction.update_demographics(demographics) | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\story_progression\story_progression_action_career.py
# Compiled at: 2015-05-01 22:45:36
# Size of source mod 2**32: 5363 bytes
import random
from sims4.repr_utils import standard_repr
from sims4.tuning.tunable import AutoFactoryInit, HasTunableSingletonFactory, TunableVariant
from story_progression.story_progression_action_base import StoryProgressionAction
from story_progression.story_progression_agents import StoryProgressionAgentSimInfo
import services, sims4.resources
class _CareerSubaction:
def __init__(self, sim_info):
self._sim_info = sim_info
self._sim_info_agent = StoryProgressionAgentSimInfo(sim_info)
def __repr__(self):
return standard_repr(self)
def save(self, data):
raise NotImplementedError
def execute_subaction(self):
raise NotImplementedError
class _CareerSubactionFactory(HasTunableSingletonFactory, AutoFactoryInit):
def load(self, data):
raise NotImplementedError
def get_potenial_subactions_gen(self, sim_info):
raise NotImplementedError
class _CareerSubactionJoin(_CareerSubaction):
def __init__(self, *args, career, **kwargs):
(super().__init__)(*args, **kwargs)
self._career = career
def __repr__(self):
return standard_repr(self, career=(self._career.__name__))
def save(self, data):
data.custom_guid = self._career.guid64
def execute_subaction(self):
user_level = random.randint(1, self._career.get_max_user_level())
self._sim_info.career_tracker.add_career((self._career(self._sim_info)), user_level_override=user_level,
give_skipped_rewards=False)
def update_demographics(self, demographics):
sim_info_agent = self._sim_info_agent.get_agent_clone(career=(self._career))
for demographic in demographics:
demographic.remove_sim_info_agent(self._sim_info_agent)
demographic.add_sim_info_agent(sim_info_agent)
class _CareerSubactionFactoryJoin(_CareerSubactionFactory):
def load(self, sim_info, data):
career_manager = services.get_instance_manager(sims4.resources.Types.CAREER)
career = career_manager.get(data.custom_guid)
if career is None:
raise TypeError
return _CareerSubactionJoin(sim_info, career=career)
def get_potenial_subactions_gen(self, sim_info):
career_service = services.get_career_service()
for career in career_service.get_career_list():
if career.career_story_progression.joining is not None:
yield _CareerSubactionJoin(sim_info, career=career)
class _CareerSubactionFactoryQuit(_CareerSubactionFactory):
pass
class _CareerSubactionFactoryFired(_CareerSubactionFactory):
pass
class _CareerSubactionFactoryRetire(_CareerSubactionFactory):
pass
class _CareerSubactionFactoryPromoted(_CareerSubactionFactory):
pass
class _CareerSubactionFactoryDemoted(_CareerSubactionFactory):
pass
class StoryProgressionActionCareer(StoryProgressionAction):
INSTANCE_TUNABLES = {'career_subaction': TunableVariant(description='\n The career operation to apply for this action.\n ',
join=(_CareerSubactionFactoryJoin.TunableFactory()),
quit=(_CareerSubactionFactoryQuit.TunableFactory()),
fired=(_CareerSubactionFactoryFired.TunableFactory()),
retire=(_CareerSubactionFactoryRetire.TunableFactory()),
promoted=(_CareerSubactionFactoryPromoted.TunableFactory()),
demoted=(_CareerSubactionFactoryDemoted.TunableFactory()),
default='join')}
def __init__(self, *args, career_subaction=None, **kwargs):
(super().__init__)(*args, **kwargs)
self._career_subaction = career_subaction
def __repr__(self):
return standard_repr(self, subaction=(self._career_subaction))
def load(self, data):
super().load(data)
self._career_subaction = self.career_subaction.load(self._sim_info, data)
def save(self, data):
super().save(data)
self._career_subaction.save(data)
@classmethod
def get_potential_actions_gen(cls, sim_info):
for career_subaction in cls.career_subaction.get_potenial_subactions_gen(sim_info):
yield cls(sim_info, career_subaction=career_subaction)
def execute_action(self):
return self._career_subaction.execute_subaction()
def update_demographics(self, demographics):
self._career_subaction.update_demographics(demographics) | en | 0.510533 | # uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\InGame\Gameplay\Scripts\Server\story_progression\story_progression_action_career.py # Compiled at: 2015-05-01 22:45:36 # Size of source mod 2**32: 5363 bytes | 1.854488 | 2 |
get_top_hits.py | beepscore/websearcher | 0 | 6633020 | <reponame>beepscore/websearcher
#!/usr/bin/env python3
from websearcher import top_hits
"""
Search words in input file. Use command line arguments.
"""
top_hits = top_hits.TopHits("@./data/input/top_hit_args.txt")
# top_hits_from_file_to_file writes to file, doesn't return anything
top_hits.top_hits_from_file_to_file()
| #!/usr/bin/env python3
from websearcher import top_hits
"""
Search words in input file. Use command line arguments.
"""
top_hits = top_hits.TopHits("@./data/input/top_hit_args.txt")
# top_hits_from_file_to_file writes to file, doesn't return anything
top_hits.top_hits_from_file_to_file() | en | 0.750986 | #!/usr/bin/env python3 Search words in input file. Use command line arguments. # top_hits_from_file_to_file writes to file, doesn't return anything | 2.90273 | 3 |
pydynamo_brain/pydynamo_brain/test/uiPunctaTest.py | ubcbraincircuits/pyDynamo | 4 | 6633021 | import os
import time
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import Qt, QPoint
from pydynamo_brain.ui import DynamoWindow
import pydynamo_brain.util as util
from pydynamo_brain.util.testableFilePicker import setNextTestPaths
from pydynamo_brain.files import fullStateToString, stringToFullState
def _near(a, b):
return abs(a - b) < 1e-6
def _checkPointXYZR(p, x, y, z, r):
assert _near(x, p.location[0]) \
and _near(y, p.location[1]) \
and _near(z, p.location[2]) \
and _near(r, p.radius)
def _init(qtbot):
dynamoWindow = DynamoWindow(None, [])
dynamoWindow.show()
qtbot.addWidget(dynamoWindow)
return dynamoWindow
def run(qtbot):
dW = _init(qtbot)
scan1Path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "files", "scan1.tif")
# Click to open new stack:
setNextTestPaths([scan1Path])
qtbot.mouseClick(dW.initialMenu.buttonN, Qt.LeftButton)
qtbot.waitUntil(lambda: len(dW.stackWindows) == 1)
sW = dW.stackWindows[0]
view = sW.dendrites.imgView.viewport()
# Wait til window is ready:
qtbot.mouseClick(view, Qt.LeftButton, pos=QPoint(0, 0))
sW.raise_()
sW.activateWindow()
sW.setFocus(True)
qtbot.waitUntil(lambda: sW.hasFocus(), timeout=10000)
# Empty tree, no puncta
assert len(dW.fullState.trees) == 1
assert len(dW.fullState.puncta) == 0
# Enter puncta mode
qtbot.keyClick(sW, 'p')
assert dW.fullState.inPunctaMode()
pDraw = QPoint(100, 100)
pMove = QPoint(100, 150)
pSize = QPoint(103, 154)
x1 = 55.2423679
y1 = 44.1938943
r1 = 3.0000000
y2 = 71.8150782
r2 = 2.7621184
# Draw the point
qtbot.mouseClick(view, Qt.LeftButton, pos=pDraw)
assert len(dW.fullState.puncta) == 1
assert len(dW.fullState.puncta[0]) == 1
point = dW.fullState.puncta[0][0]
_checkPointXYZR(point, x1, y1, 0, r1)
# Move the point
qtbot.mouseClick(view, Qt.LeftButton, pos=pMove, modifier=Qt.ShiftModifier)
assert len(dW.fullState.puncta) == 1
assert len(dW.fullState.puncta[0]) == 1
point = dW.fullState.puncta[0][0]
_checkPointXYZR(point, x1, y2, 0, r1)
# Resize the point
qtbot.mouseClick(view, Qt.RightButton, pos=pSize)
assert len(dW.fullState.puncta) == 1
assert len(dW.fullState.puncta[0]) == 1
point = dW.fullState.puncta[0][0]
_checkPointXYZR(point, x1, y2, 0, r2)
# Draw a second point:
qtbot.mouseClick(view, Qt.LeftButton, pos=pDraw)
assert len(dW.fullState.puncta) == 1
assert len(dW.fullState.puncta[0]) == 2
point = dW.fullState.puncta[0][1]
_checkPointXYZR(point, x1, y1, 0, r1)
# Delete the first point by clicking on its boundary:
qtbot.mouseClick(view, Qt.LeftButton, pos=pSize, modifier=Qt.ControlModifier)
assert len(dW.fullState.puncta) == 1
assert len(dW.fullState.puncta[0]) == 1
# ... only the second point left
point = dW.fullState.puncta[0][0]
_checkPointXYZR(point, x1, y1, 0, r1)
# Save and load to verify
toString = fullStateToString(dW.fullState)
toFullState = stringToFullState(toString, "")
assert len(toFullState.puncta) == 1
assert len(toFullState.puncta[0]) == 1
point = toFullState.puncta[0][0]
_checkPointXYZR(point, x1, y1, 0, r1)
return True
| import os
import time
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import Qt, QPoint
from pydynamo_brain.ui import DynamoWindow
import pydynamo_brain.util as util
from pydynamo_brain.util.testableFilePicker import setNextTestPaths
from pydynamo_brain.files import fullStateToString, stringToFullState
def _near(a, b):
return abs(a - b) < 1e-6
def _checkPointXYZR(p, x, y, z, r):
assert _near(x, p.location[0]) \
and _near(y, p.location[1]) \
and _near(z, p.location[2]) \
and _near(r, p.radius)
def _init(qtbot):
dynamoWindow = DynamoWindow(None, [])
dynamoWindow.show()
qtbot.addWidget(dynamoWindow)
return dynamoWindow
def run(qtbot):
dW = _init(qtbot)
scan1Path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "files", "scan1.tif")
# Click to open new stack:
setNextTestPaths([scan1Path])
qtbot.mouseClick(dW.initialMenu.buttonN, Qt.LeftButton)
qtbot.waitUntil(lambda: len(dW.stackWindows) == 1)
sW = dW.stackWindows[0]
view = sW.dendrites.imgView.viewport()
# Wait til window is ready:
qtbot.mouseClick(view, Qt.LeftButton, pos=QPoint(0, 0))
sW.raise_()
sW.activateWindow()
sW.setFocus(True)
qtbot.waitUntil(lambda: sW.hasFocus(), timeout=10000)
# Empty tree, no puncta
assert len(dW.fullState.trees) == 1
assert len(dW.fullState.puncta) == 0
# Enter puncta mode
qtbot.keyClick(sW, 'p')
assert dW.fullState.inPunctaMode()
pDraw = QPoint(100, 100)
pMove = QPoint(100, 150)
pSize = QPoint(103, 154)
x1 = 55.2423679
y1 = 44.1938943
r1 = 3.0000000
y2 = 71.8150782
r2 = 2.7621184
# Draw the point
qtbot.mouseClick(view, Qt.LeftButton, pos=pDraw)
assert len(dW.fullState.puncta) == 1
assert len(dW.fullState.puncta[0]) == 1
point = dW.fullState.puncta[0][0]
_checkPointXYZR(point, x1, y1, 0, r1)
# Move the point
qtbot.mouseClick(view, Qt.LeftButton, pos=pMove, modifier=Qt.ShiftModifier)
assert len(dW.fullState.puncta) == 1
assert len(dW.fullState.puncta[0]) == 1
point = dW.fullState.puncta[0][0]
_checkPointXYZR(point, x1, y2, 0, r1)
# Resize the point
qtbot.mouseClick(view, Qt.RightButton, pos=pSize)
assert len(dW.fullState.puncta) == 1
assert len(dW.fullState.puncta[0]) == 1
point = dW.fullState.puncta[0][0]
_checkPointXYZR(point, x1, y2, 0, r2)
# Draw a second point:
qtbot.mouseClick(view, Qt.LeftButton, pos=pDraw)
assert len(dW.fullState.puncta) == 1
assert len(dW.fullState.puncta[0]) == 2
point = dW.fullState.puncta[0][1]
_checkPointXYZR(point, x1, y1, 0, r1)
# Delete the first point by clicking on its boundary:
qtbot.mouseClick(view, Qt.LeftButton, pos=pSize, modifier=Qt.ControlModifier)
assert len(dW.fullState.puncta) == 1
assert len(dW.fullState.puncta[0]) == 1
# ... only the second point left
point = dW.fullState.puncta[0][0]
_checkPointXYZR(point, x1, y1, 0, r1)
# Save and load to verify
toString = fullStateToString(dW.fullState)
toFullState = stringToFullState(toString, "")
assert len(toFullState.puncta) == 1
assert len(toFullState.puncta[0]) == 1
point = toFullState.puncta[0][0]
_checkPointXYZR(point, x1, y1, 0, r1)
return True
| en | 0.815078 | # Click to open new stack: # Wait til window is ready: # Empty tree, no puncta # Enter puncta mode # Draw the point # Move the point # Resize the point # Draw a second point: # Delete the first point by clicking on its boundary: # ... only the second point left # Save and load to verify | 2.267273 | 2 |
go/bootstrap.py | allaparthi/monorail | 0 | 6633022 | #!/usr/bin/env vpython
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prepares a local hermetic Go installation.
- Downloads and unpacks the Go toolset in ../../golang.
- Downloads and installs Glide (used by deps.py).
- Fetches code dependencies via deps.py.
"""
import argparse
import collections
import contextlib
import json
import logging
import os
import shutil
import stat
import subprocess
import sys
import tempfile
LOGGER = logging.getLogger(__name__)
# /path/to/infra
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Directory with .gclient file.
GCLIENT_ROOT = os.path.dirname(ROOT)
# The current overarching Infra version. If this changes, everything will be
# updated regardless of its version.
INFRA_VERSION = 1
# Where to install Go toolset to. GOROOT would be <TOOLSET_ROOT>/go.
TOOLSET_ROOT = os.path.join(os.path.dirname(ROOT), 'golang')
# Default workspace with infra go code.
WORKSPACE = os.path.join(ROOT, 'go')
# Platform depended suffix for executable files.
EXE_SFX = '.exe' if sys.platform == 'win32' else ''
# On Windows we use git from depot_tools.
GIT_EXE = 'git.bat' if sys.platform == 'win32' else 'git'
# Version of Go CIPD package (infra/3pp/tools/go/${platform}) to install.
TOOLSET_VERSION = '1.14.2'
# Describes how to fetch 'glide'.
GLIDE_SOURCE = {
'src/github.com/Masterminds/glide': {
'url': (
'https://chromium.googlesource.com/external/github.com/'
'Masterminds/glide.git'),
'rev': 'refs/tags/v0.13.3',
'patches': [
'0001-Fix-edge-case-related-to-git-submodules-on-Windows.patch',
],
},
}
# Layout is the layout of the bootstrap installation.
_Layout = collections.namedtuple('Layout', (
# The path where the Go toolset is checked out at.
'toolset_root',
# The workspace path.
'workspace',
# The list of vendor directories. Each will have a Glide "deps.yaml" in it.
'vendor_paths',
# List of paths to append to GOPATH (in additional to `workspace`).
'go_paths',
# The list of DEPS'd in paths that contain Go sources. This is used to
# determine when our vendored tools need to be re-installed.
'go_deps_paths',
# Go package paths of tools to install into the bootstrap environment.
'go_install_tools',
))
class Layout(_Layout):
@property
def go_repo_versions_path(self):
"""The path where the latest installed Go repository versions are recorded.
"""
return os.path.join(self.workspace, '.deps_repo_versions.json')
# A base empty Layout.
_EMPTY_LAYOUT = Layout(
toolset_root=None,
workspace=None,
vendor_paths=None,
go_paths=None,
go_deps_paths=None,
go_install_tools=None)
# Infra standard layout.
LAYOUT = Layout(
toolset_root=TOOLSET_ROOT,
workspace=WORKSPACE,
vendor_paths=[WORKSPACE],
go_paths=[],
go_deps_paths=[os.path.join(WORKSPACE, _p) for _p in (
'src/go.chromium.org/gae',
'src/go.chromium.org/luci',
)],
go_install_tools=[
# Note: please add only tools that really should be in PATH in default
# dev environment.
'github.com/golang/mock/mockgen',
'go.chromium.org/gae/tools/proto-gae',
'go.chromium.org/luci/grpc/cmd/...',
'go.chromium.org/luci/luci_notify/cmd/...',
'go.chromium.org/luci/tools/cmd/...',
'infra/cmd/bqexport',
'infra/cmd/cloudsqlhelper',
],
)
# Describes a modification of os.environ, see get_go_environ_diff(...).
EnvironDiff = collections.namedtuple('EnvironDiff', [
'env', # {k:v} with vars to set or delete (if v == None)
'env_prefixes', # {k: [path]} with entries to prepend
])
class Failure(Exception):
"""Bootstrap failed."""
def read_file(path):
"""Returns contents of a given file or None if not readable."""
assert isinstance(path, (list, tuple))
try:
with open(os.path.join(*path), 'r') as f:
return f.read()
except IOError:
return None
def write_file(path, data):
"""Writes |data| to a file."""
assert isinstance(path, (list, tuple))
with open(os.path.join(*path), 'w') as f:
f.write(data)
def remove_directory(path):
"""Recursively removes a directory."""
assert isinstance(path, (list, tuple))
p = os.path.join(*path)
if not os.path.exists(p):
return
# Crutch to remove read-only file (.git/* in particular) on Windows.
def onerror(func, path, _exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
shutil.rmtree(p, onerror=onerror if sys.platform == 'win32' else None)
def install_toolset(toolset_root, version):
"""Downloads and installs Go toolset from CIPD.
GOROOT would be <toolset_root>/go/.
"""
cmd = subprocess.Popen(
[
'cipd.bat' if sys.platform == 'win32' else 'cipd',
'ensure', '-ensure-file', '-', '-root', toolset_root,
],
stdin=subprocess.PIPE)
cmd.communicate(
'@Subdir go\n'
'infra/3pp/tools/go/${platform} version:%s\n' % version
)
if cmd.returncode:
raise Failure('CIPD call failed, exit code %d' % cmd.returncode)
LOGGER.info('Validating...')
check_hello_world(toolset_root)
@contextlib.contextmanager
def temp_dir(path):
"""Creates a temporary directory, then deletes it."""
tmp = tempfile.mkdtemp(dir=path)
try:
yield tmp
finally:
remove_directory([tmp])
def check_hello_world(toolset_root):
"""Compiles and runs 'hello world' program to verify that toolset works."""
with temp_dir(toolset_root) as tmp:
path = os.path.join(tmp, 'hello.go')
write_file([path], r"""
package main
import "fmt"
func main() { fmt.Println("hello, world") }
""")
out = call_bare_go(toolset_root, tmp, ['run', path])
if out != 'hello, world':
raise Failure('Unexpected output from the sample program:\n%s' % out)
def call_bare_go(toolset_root, workspace, args):
"""Calls 'go <args>' in the given workspace scrubbing all other Go env vars.
Args:
toolset_root: where Go is installed at.
workspace: value for GOPATH, all other Go-specific env vars are scrubbed.
args: command line arguments for 'go' tool.
Returns:
Captured stripped stdout+stderr.
Raises:
Failure if the call failed. All details are logged in this case.
"""
cmd = [get_go_exe(toolset_root)] + args
env = get_go_environ(_EMPTY_LAYOUT._replace(
toolset_root=toolset_root,
workspace=workspace))
proc = subprocess.Popen(
cmd,
env=env,
cwd=workspace,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = proc.communicate()
if proc.returncode:
LOGGER.error('Failed to run %s: exit code %d', cmd, proc.returncode)
LOGGER.error('Environment:')
for k, v in sorted(env.items()):
LOGGER.error(' %s = %s', k, v)
LOGGER.error('Output:\n\n%s', out)
raise Failure('Go invocation failed, see the log')
return out.strip()
def infra_version_outdated(root):
infra = read_file([root, 'INFRA_VERSION'])
if not infra:
return True
return int(infra.strip()) < INFRA_VERSION
def write_infra_version(root):
write_file([root, 'INFRA_VERSION'], str(INFRA_VERSION))
def ensure_toolset_installed(toolset_root, version):
"""Installs or updates Go toolset if necessary.
Returns True if new toolset was installed.
"""
installed = read_file([toolset_root, 'INSTALLED_TOOLSET'])
if infra_version_outdated(toolset_root):
LOGGER.info('Infra version is out of date.')
elif installed == version:
LOGGER.debug('Go toolset is up-to-date: %s', installed)
return False
LOGGER.info('Installing Go toolset.')
LOGGER.info(' Old toolset is %s', installed)
LOGGER.info(' New toolset is %s', version)
remove_directory([toolset_root])
install_toolset(toolset_root, version)
LOGGER.info('Go toolset installed: %s', version)
write_file([toolset_root, 'INSTALLED_TOOLSET'], version)
write_infra_version(toolset_root)
return True
def ensure_glide_installed(toolset_root):
"""Installs or updates 'glide' tool."""
installed_tools = read_file([toolset_root, 'INSTALLED_TOOLS'])
available_tools = json.dumps(GLIDE_SOURCE, sort_keys=True)
if installed_tools == available_tools:
LOGGER.debug('Glide is up-to-date')
return
def install(workspace, pkg):
call_bare_go(toolset_root, workspace, ['install', pkg])
# Windows os.rename doesn't support overwrites.
name = pkg[pkg.rfind('/')+1:]
dest = os.path.join(toolset_root, 'go', 'bin', name + EXE_SFX)
if os.path.exists(dest):
os.remove(dest)
os.rename(os.path.join(workspace, 'bin', name + EXE_SFX), dest)
LOGGER.info('Installed %s', dest)
LOGGER.info('Installing Glide...')
with temp_dir(toolset_root) as tmp:
fetch_glide_code(tmp, GLIDE_SOURCE)
install(tmp, 'github.com/Masterminds/glide')
LOGGER.info('Glide is installed')
write_file([toolset_root, 'INSTALLED_TOOLS'], available_tools)
def fetch_glide_code(workspace, spec):
"""Fetches glide source code."""
def git(cmd, cwd):
subprocess.check_call([GIT_EXE] + cmd, cwd=cwd, stdout=sys.stderr)
for path, repo in sorted(spec.iteritems()):
path = os.path.join(workspace, path.replace('/', os.sep))
os.makedirs(path)
git(['clone', repo['url'], '.'], cwd=path)
git(['checkout', repo['rev']], cwd=path)
for patch in repo.get('patches', []):
LOGGER.info('Applying %s', patch)
git(['apply', os.path.join(WORKSPACE, 'patches', patch)], cwd=path)
def get_git_repository_head(path):
head = subprocess.check_output([GIT_EXE, '-C', path, 'rev-parse', 'HEAD'])
return head.strip()
def get_deps_repo_versions(layout):
"""Loads the repository version object stored at GO_REPO_VERSIONS.
If no version object exists, an empty dictionary will be returned.
"""
if not os.path.isfile(layout.go_repo_versions_path):
return {}
with open(layout.go_repo_versions_path, 'r') as fd:
return json.load(fd)
def save_deps_repo_versions(layout, v):
"""Records the repository version object, "v", as JSON at GO_REPO_VERSIONS."""
with open(layout.go_repo_versions_path, 'w') as fd:
json.dump(v, fd, indent=2, sort_keys=True)
def install_deps_tools(layout, force):
if not layout.go_install_tools:
return False
# Load the current HEAD for our Go dependency paths.
current_versions = {}
for path in (layout.go_deps_paths or ()):
current_versions[path] = get_git_repository_head(path)
# Only install the tools if our checkout versions have changed.
if not force and get_deps_repo_versions(layout) == current_versions:
return False
# (Re)install all of our Go packages.
LOGGER.info('Installing Go tools: %s', layout.go_install_tools)
env = get_go_environ(layout)
subprocess.check_call([get_go_exe(layout.toolset_root), 'install'] +
list(layout.go_install_tools),
stdout=sys.stderr, stderr=sys.stderr, env=env)
save_deps_repo_versions(layout, current_versions)
return True
def update_vendor_packages(workspace, toolset_root, force=False):
"""Runs deps.py to fetch and install pinned packages.
Returns (bool): True if the dependencies were actually updated, False if they
were already at the correct version.
"""
if not os.path.isfile(os.path.join(workspace, 'deps.lock')):
return False
# We will pass "deps.py" the "--update-out" argument, which will create a
# file at a temporary path if the deps were actually updated. We use this to
# derive our return value.
with temp_dir(workspace) as tdir:
update_out_path = os.path.join(tdir, 'deps_updated.json')
cmd = [
sys.executable, '-u', os.path.join(ROOT, 'go', 'deps.py'),
'--workspace', workspace,
'--goroot', os.path.join(toolset_root, 'go'),
'install',
'--update-out', update_out_path,
]
if force:
cmd.append('--force')
env = os.environ.copy()
env['PATH'] = os.pathsep.join([
os.path.join(ROOT, 'cipd'), env.get('PATH', '')
])
subprocess.check_call(cmd, stdout=sys.stderr, env=env)
return os.path.isfile(update_out_path)
def get_go_environ_diff(layout):
"""Returns what modifications must be applied to the environ to enable Go.
Pure function of 'layout', doesn't depend on current os.environ or state on
disk.
Args:
layout: The Layout to derive the environment from.
Returns:
EnvironDiff.
"""
# Paths to search Go code for. Order is important.
vendor_paths = layout.vendor_paths or ()
all_go_paths = []
all_go_paths.extend(os.path.join(p, '.vendor') for p in vendor_paths)
if layout.go_paths:
all_go_paths.extend(layout.go_paths)
all_go_paths.append(layout.workspace)
# New PATH entries. Order is important.
paths_to_add = [
os.path.join(layout.toolset_root, 'go', 'bin'),
os.path.join(ROOT, 'cipd'),
os.path.join(ROOT, 'cipd', 'bin'),
os.path.join(ROOT, 'luci', 'appengine', 'components', 'tools'),
]
paths_to_add.extend(os.path.join(p, '.vendor', 'bin') for p in vendor_paths)
paths_to_add.append(os.path.join(layout.workspace, 'bin'))
return EnvironDiff(
env={
'GOROOT': os.path.join(layout.toolset_root, 'go'),
'GOBIN': os.path.join(layout.workspace, 'bin'),
'GOPATH': os.pathsep.join(all_go_paths),
# Don't use default cache in '~'.
'GOCACHE': os.path.join(layout.workspace, '.cache'),
# Infra Go workspace is not ready for modules yet, attempting to use
# them will cause pain.
'GOPROXY': 'off',
'GO111MODULE': 'off',
},
env_prefixes={'PATH': paths_to_add},
)
def get_go_environ(layout):
"""Returns a copy of os.environ with mutated GO* environment variables.
This function primarily targets environ on workstations. It assumes
the developer may be constantly switching between infra and infra_internal
go environments and it has some protection against related edge cases.
Args:
layout: The Layout to derive the environment from.
"""
diff = get_go_environ_diff(layout)
env = os.environ.copy()
for k, v in diff.env.items():
if v is not None:
env[k] = v
else:
env.pop(k, None)
path = env['PATH'].split(os.pathsep)
paths_to_add = diff.env_prefixes['PATH']
# Remove preexisting bin/ paths (including .vendor/bin) pointing to infra
# or infra_internal Go workspaces. It's important when switching from
# infra_internal to infra environments: infra_internal bin paths should
# be removed.
def should_keep(p):
if p in paths_to_add:
return False # we'll move this entry to the front below
# TODO(vadimsh): This code knows about gclient checkout layout.
for d in ['infra', 'infra_internal']:
if p.startswith(os.path.join(GCLIENT_ROOT, d, 'go')):
return False
return True
path = filter(should_keep, path)
# Prepend paths_to_add to PATH.
env['PATH'] = os.pathsep.join(paths_to_add + path)
# Add a tag to the prompt
infra_prompt_tag = env.get('INFRA_PROMPT_TAG')
if infra_prompt_tag is None:
infra_prompt_tag = '[cr go] '
if infra_prompt_tag:
prompt = env.get('PS1')
if prompt and infra_prompt_tag not in prompt:
env['PS1'] = infra_prompt_tag + prompt
return env
def get_go_exe(toolset_root):
"""Returns path to go executable."""
return os.path.join(toolset_root, 'go', 'bin', 'go' + EXE_SFX)
def bootstrap(layout, logging_level, args=None):
"""Installs all dependencies in default locations.
Supposed to be called at the beginning of some script (it modifies logger).
Args:
layout: instance of Layout describing what to install and where.
logging_level: logging level of bootstrap process.
args: positional arguments of bootstrap.py (if any).
Raises:
Failure if bootstrap fails.
"""
logging.basicConfig()
LOGGER.setLevel(logging_level)
# One optional positional argument is a path to write JSON with env diff to.
# This is used by recipes which use it in `with api.context(env=...): ...`.
json_output = None
if args is not None:
parser = argparse.ArgumentParser()
parser.add_argument(
'json_output',
nargs='?',
metavar='PATH',
help='Where to write JSON with necessary environ adjustments')
json_output = parser.parse_args(args=args).json_output
# We need to build and run some Go binaries during bootstrap (e.g. glide), so
# make sure cross-compilation mode is disabled during bootstrap. Restore it
# back once bootstrap is finished.
prev_environ = {}
for k in ('GOOS', 'GOARCH', 'GOARM'):
prev_environ[k] = os.environ.pop(k, None)
try:
toolset_updated = ensure_toolset_installed(
layout.toolset_root, TOOLSET_VERSION)
ensure_glide_installed(layout.toolset_root)
vendor_updated = toolset_updated
for p in layout.vendor_paths:
vendor_updated |= update_vendor_packages(
p, layout.toolset_root, force=toolset_updated)
if toolset_updated:
# GOPATH/pkg may have binaries generated with previous version of toolset,
# they may not be compatible and "go build" isn't smart enough to rebuild
# them.
for p in layout.vendor_paths:
remove_directory([p, 'pkg'])
install_deps_tools(layout, vendor_updated)
finally:
# Restore os.environ back. Have to do it key-by-key to actually modify the
# process environment (replacing os.environ object as a whole does nothing).
for k, v in prev_environ.iteritems():
if v is not None:
os.environ[k] = v
output = get_go_environ_diff(layout)._asdict()
output['go_version'] = TOOLSET_VERSION
json_blob = json.dumps(
output,
sort_keys=True,
indent=2,
separators=(',', ': '))
if json_output == '-':
print json_blob
elif json_output:
with open(json_output, 'w') as f:
f.write(json_blob)
def prepare_go_environ():
"""Returns dict with environment variables to set to use Go toolset.
Installs or updates the toolset and vendored dependencies if necessary.
"""
bootstrap(LAYOUT, logging.INFO)
return get_go_environ(LAYOUT)
def find_executable(name, workspaces):
"""Returns full path to an executable in some bin/ (in GOROOT or GOBIN)."""
basename = name
if EXE_SFX and basename.endswith(EXE_SFX):
basename = basename[:-len(EXE_SFX)]
roots = [os.path.join(LAYOUT.toolset_root, 'go', 'bin')]
for path in workspaces:
roots.extend([
os.path.join(path, '.vendor', 'bin'),
os.path.join(path, 'bin'),
])
for root in roots:
full_path = os.path.join(root, basename + EXE_SFX)
if os.path.exists(full_path):
return full_path
return name
def main(args):
bootstrap(LAYOUT, logging.DEBUG, args)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| #!/usr/bin/env vpython
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prepares a local hermetic Go installation.
- Downloads and unpacks the Go toolset in ../../golang.
- Downloads and installs Glide (used by deps.py).
- Fetches code dependencies via deps.py.
"""
import argparse
import collections
import contextlib
import json
import logging
import os
import shutil
import stat
import subprocess
import sys
import tempfile
LOGGER = logging.getLogger(__name__)
# /path/to/infra
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Directory with .gclient file.
GCLIENT_ROOT = os.path.dirname(ROOT)
# The current overarching Infra version. If this changes, everything will be
# updated regardless of its version.
INFRA_VERSION = 1
# Where to install Go toolset to. GOROOT would be <TOOLSET_ROOT>/go.
TOOLSET_ROOT = os.path.join(os.path.dirname(ROOT), 'golang')
# Default workspace with infra go code.
WORKSPACE = os.path.join(ROOT, 'go')
# Platform depended suffix for executable files.
EXE_SFX = '.exe' if sys.platform == 'win32' else ''
# On Windows we use git from depot_tools.
GIT_EXE = 'git.bat' if sys.platform == 'win32' else 'git'
# Version of Go CIPD package (infra/3pp/tools/go/${platform}) to install.
TOOLSET_VERSION = '1.14.2'
# Describes how to fetch 'glide'.
GLIDE_SOURCE = {
'src/github.com/Masterminds/glide': {
'url': (
'https://chromium.googlesource.com/external/github.com/'
'Masterminds/glide.git'),
'rev': 'refs/tags/v0.13.3',
'patches': [
'0001-Fix-edge-case-related-to-git-submodules-on-Windows.patch',
],
},
}
# Layout is the layout of the bootstrap installation.
_Layout = collections.namedtuple('Layout', (
# The path where the Go toolset is checked out at.
'toolset_root',
# The workspace path.
'workspace',
# The list of vendor directories. Each will have a Glide "deps.yaml" in it.
'vendor_paths',
# List of paths to append to GOPATH (in additional to `workspace`).
'go_paths',
# The list of DEPS'd in paths that contain Go sources. This is used to
# determine when our vendored tools need to be re-installed.
'go_deps_paths',
# Go package paths of tools to install into the bootstrap environment.
'go_install_tools',
))
class Layout(_Layout):
@property
def go_repo_versions_path(self):
"""The path where the latest installed Go repository versions are recorded.
"""
return os.path.join(self.workspace, '.deps_repo_versions.json')
# A base empty Layout.
_EMPTY_LAYOUT = Layout(
toolset_root=None,
workspace=None,
vendor_paths=None,
go_paths=None,
go_deps_paths=None,
go_install_tools=None)
# Infra standard layout.
LAYOUT = Layout(
toolset_root=TOOLSET_ROOT,
workspace=WORKSPACE,
vendor_paths=[WORKSPACE],
go_paths=[],
go_deps_paths=[os.path.join(WORKSPACE, _p) for _p in (
'src/go.chromium.org/gae',
'src/go.chromium.org/luci',
)],
go_install_tools=[
# Note: please add only tools that really should be in PATH in default
# dev environment.
'github.com/golang/mock/mockgen',
'go.chromium.org/gae/tools/proto-gae',
'go.chromium.org/luci/grpc/cmd/...',
'go.chromium.org/luci/luci_notify/cmd/...',
'go.chromium.org/luci/tools/cmd/...',
'infra/cmd/bqexport',
'infra/cmd/cloudsqlhelper',
],
)
# Describes a modification of os.environ, see get_go_environ_diff(...).
EnvironDiff = collections.namedtuple('EnvironDiff', [
'env', # {k:v} with vars to set or delete (if v == None)
'env_prefixes', # {k: [path]} with entries to prepend
])
class Failure(Exception):
"""Bootstrap failed."""
def read_file(path):
"""Returns contents of a given file or None if not readable."""
assert isinstance(path, (list, tuple))
try:
with open(os.path.join(*path), 'r') as f:
return f.read()
except IOError:
return None
def write_file(path, data):
"""Writes |data| to a file."""
assert isinstance(path, (list, tuple))
with open(os.path.join(*path), 'w') as f:
f.write(data)
def remove_directory(path):
"""Recursively removes a directory."""
assert isinstance(path, (list, tuple))
p = os.path.join(*path)
if not os.path.exists(p):
return
# Crutch to remove read-only file (.git/* in particular) on Windows.
def onerror(func, path, _exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
shutil.rmtree(p, onerror=onerror if sys.platform == 'win32' else None)
def install_toolset(toolset_root, version):
"""Downloads and installs Go toolset from CIPD.
GOROOT would be <toolset_root>/go/.
"""
cmd = subprocess.Popen(
[
'cipd.bat' if sys.platform == 'win32' else 'cipd',
'ensure', '-ensure-file', '-', '-root', toolset_root,
],
stdin=subprocess.PIPE)
cmd.communicate(
'@Subdir go\n'
'infra/3pp/tools/go/${platform} version:%s\n' % version
)
if cmd.returncode:
raise Failure('CIPD call failed, exit code %d' % cmd.returncode)
LOGGER.info('Validating...')
check_hello_world(toolset_root)
@contextlib.contextmanager
def temp_dir(path):
"""Creates a temporary directory, then deletes it."""
tmp = tempfile.mkdtemp(dir=path)
try:
yield tmp
finally:
remove_directory([tmp])
def check_hello_world(toolset_root):
"""Compiles and runs 'hello world' program to verify that toolset works."""
with temp_dir(toolset_root) as tmp:
path = os.path.join(tmp, 'hello.go')
write_file([path], r"""
package main
import "fmt"
func main() { fmt.Println("hello, world") }
""")
out = call_bare_go(toolset_root, tmp, ['run', path])
if out != 'hello, world':
raise Failure('Unexpected output from the sample program:\n%s' % out)
def call_bare_go(toolset_root, workspace, args):
"""Calls 'go <args>' in the given workspace scrubbing all other Go env vars.
Args:
toolset_root: where Go is installed at.
workspace: value for GOPATH, all other Go-specific env vars are scrubbed.
args: command line arguments for 'go' tool.
Returns:
Captured stripped stdout+stderr.
Raises:
Failure if the call failed. All details are logged in this case.
"""
cmd = [get_go_exe(toolset_root)] + args
env = get_go_environ(_EMPTY_LAYOUT._replace(
toolset_root=toolset_root,
workspace=workspace))
proc = subprocess.Popen(
cmd,
env=env,
cwd=workspace,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = proc.communicate()
if proc.returncode:
LOGGER.error('Failed to run %s: exit code %d', cmd, proc.returncode)
LOGGER.error('Environment:')
for k, v in sorted(env.items()):
LOGGER.error(' %s = %s', k, v)
LOGGER.error('Output:\n\n%s', out)
raise Failure('Go invocation failed, see the log')
return out.strip()
def infra_version_outdated(root):
infra = read_file([root, 'INFRA_VERSION'])
if not infra:
return True
return int(infra.strip()) < INFRA_VERSION
def write_infra_version(root):
write_file([root, 'INFRA_VERSION'], str(INFRA_VERSION))
def ensure_toolset_installed(toolset_root, version):
"""Installs or updates Go toolset if necessary.
Returns True if new toolset was installed.
"""
installed = read_file([toolset_root, 'INSTALLED_TOOLSET'])
if infra_version_outdated(toolset_root):
LOGGER.info('Infra version is out of date.')
elif installed == version:
LOGGER.debug('Go toolset is up-to-date: %s', installed)
return False
LOGGER.info('Installing Go toolset.')
LOGGER.info(' Old toolset is %s', installed)
LOGGER.info(' New toolset is %s', version)
remove_directory([toolset_root])
install_toolset(toolset_root, version)
LOGGER.info('Go toolset installed: %s', version)
write_file([toolset_root, 'INSTALLED_TOOLSET'], version)
write_infra_version(toolset_root)
return True
def ensure_glide_installed(toolset_root):
"""Installs or updates 'glide' tool."""
installed_tools = read_file([toolset_root, 'INSTALLED_TOOLS'])
available_tools = json.dumps(GLIDE_SOURCE, sort_keys=True)
if installed_tools == available_tools:
LOGGER.debug('Glide is up-to-date')
return
def install(workspace, pkg):
call_bare_go(toolset_root, workspace, ['install', pkg])
# Windows os.rename doesn't support overwrites.
name = pkg[pkg.rfind('/')+1:]
dest = os.path.join(toolset_root, 'go', 'bin', name + EXE_SFX)
if os.path.exists(dest):
os.remove(dest)
os.rename(os.path.join(workspace, 'bin', name + EXE_SFX), dest)
LOGGER.info('Installed %s', dest)
LOGGER.info('Installing Glide...')
with temp_dir(toolset_root) as tmp:
fetch_glide_code(tmp, GLIDE_SOURCE)
install(tmp, 'github.com/Masterminds/glide')
LOGGER.info('Glide is installed')
write_file([toolset_root, 'INSTALLED_TOOLS'], available_tools)
def fetch_glide_code(workspace, spec):
"""Fetches glide source code."""
def git(cmd, cwd):
subprocess.check_call([GIT_EXE] + cmd, cwd=cwd, stdout=sys.stderr)
for path, repo in sorted(spec.iteritems()):
path = os.path.join(workspace, path.replace('/', os.sep))
os.makedirs(path)
git(['clone', repo['url'], '.'], cwd=path)
git(['checkout', repo['rev']], cwd=path)
for patch in repo.get('patches', []):
LOGGER.info('Applying %s', patch)
git(['apply', os.path.join(WORKSPACE, 'patches', patch)], cwd=path)
def get_git_repository_head(path):
head = subprocess.check_output([GIT_EXE, '-C', path, 'rev-parse', 'HEAD'])
return head.strip()
def get_deps_repo_versions(layout):
"""Loads the repository version object stored at GO_REPO_VERSIONS.
If no version object exists, an empty dictionary will be returned.
"""
if not os.path.isfile(layout.go_repo_versions_path):
return {}
with open(layout.go_repo_versions_path, 'r') as fd:
return json.load(fd)
def save_deps_repo_versions(layout, v):
"""Records the repository version object, "v", as JSON at GO_REPO_VERSIONS."""
with open(layout.go_repo_versions_path, 'w') as fd:
json.dump(v, fd, indent=2, sort_keys=True)
def install_deps_tools(layout, force):
if not layout.go_install_tools:
return False
# Load the current HEAD for our Go dependency paths.
current_versions = {}
for path in (layout.go_deps_paths or ()):
current_versions[path] = get_git_repository_head(path)
# Only install the tools if our checkout versions have changed.
if not force and get_deps_repo_versions(layout) == current_versions:
return False
# (Re)install all of our Go packages.
LOGGER.info('Installing Go tools: %s', layout.go_install_tools)
env = get_go_environ(layout)
subprocess.check_call([get_go_exe(layout.toolset_root), 'install'] +
list(layout.go_install_tools),
stdout=sys.stderr, stderr=sys.stderr, env=env)
save_deps_repo_versions(layout, current_versions)
return True
def update_vendor_packages(workspace, toolset_root, force=False):
"""Runs deps.py to fetch and install pinned packages.
Returns (bool): True if the dependencies were actually updated, False if they
were already at the correct version.
"""
if not os.path.isfile(os.path.join(workspace, 'deps.lock')):
return False
# We will pass "deps.py" the "--update-out" argument, which will create a
# file at a temporary path if the deps were actually updated. We use this to
# derive our return value.
with temp_dir(workspace) as tdir:
update_out_path = os.path.join(tdir, 'deps_updated.json')
cmd = [
sys.executable, '-u', os.path.join(ROOT, 'go', 'deps.py'),
'--workspace', workspace,
'--goroot', os.path.join(toolset_root, 'go'),
'install',
'--update-out', update_out_path,
]
if force:
cmd.append('--force')
env = os.environ.copy()
env['PATH'] = os.pathsep.join([
os.path.join(ROOT, 'cipd'), env.get('PATH', '')
])
subprocess.check_call(cmd, stdout=sys.stderr, env=env)
return os.path.isfile(update_out_path)
def get_go_environ_diff(layout):
"""Returns what modifications must be applied to the environ to enable Go.
Pure function of 'layout', doesn't depend on current os.environ or state on
disk.
Args:
layout: The Layout to derive the environment from.
Returns:
EnvironDiff.
"""
# Paths to search Go code for. Order is important.
vendor_paths = layout.vendor_paths or ()
all_go_paths = []
all_go_paths.extend(os.path.join(p, '.vendor') for p in vendor_paths)
if layout.go_paths:
all_go_paths.extend(layout.go_paths)
all_go_paths.append(layout.workspace)
# New PATH entries. Order is important.
paths_to_add = [
os.path.join(layout.toolset_root, 'go', 'bin'),
os.path.join(ROOT, 'cipd'),
os.path.join(ROOT, 'cipd', 'bin'),
os.path.join(ROOT, 'luci', 'appengine', 'components', 'tools'),
]
paths_to_add.extend(os.path.join(p, '.vendor', 'bin') for p in vendor_paths)
paths_to_add.append(os.path.join(layout.workspace, 'bin'))
return EnvironDiff(
env={
'GOROOT': os.path.join(layout.toolset_root, 'go'),
'GOBIN': os.path.join(layout.workspace, 'bin'),
'GOPATH': os.pathsep.join(all_go_paths),
# Don't use default cache in '~'.
'GOCACHE': os.path.join(layout.workspace, '.cache'),
# Infra Go workspace is not ready for modules yet, attempting to use
# them will cause pain.
'GOPROXY': 'off',
'GO111MODULE': 'off',
},
env_prefixes={'PATH': paths_to_add},
)
def get_go_environ(layout):
"""Returns a copy of os.environ with mutated GO* environment variables.
This function primarily targets environ on workstations. It assumes
the developer may be constantly switching between infra and infra_internal
go environments and it has some protection against related edge cases.
Args:
layout: The Layout to derive the environment from.
"""
diff = get_go_environ_diff(layout)
env = os.environ.copy()
for k, v in diff.env.items():
if v is not None:
env[k] = v
else:
env.pop(k, None)
path = env['PATH'].split(os.pathsep)
paths_to_add = diff.env_prefixes['PATH']
# Remove preexisting bin/ paths (including .vendor/bin) pointing to infra
# or infra_internal Go workspaces. It's important when switching from
# infra_internal to infra environments: infra_internal bin paths should
# be removed.
def should_keep(p):
if p in paths_to_add:
return False # we'll move this entry to the front below
# TODO(vadimsh): This code knows about gclient checkout layout.
for d in ['infra', 'infra_internal']:
if p.startswith(os.path.join(GCLIENT_ROOT, d, 'go')):
return False
return True
path = filter(should_keep, path)
# Prepend paths_to_add to PATH.
env['PATH'] = os.pathsep.join(paths_to_add + path)
# Add a tag to the prompt
infra_prompt_tag = env.get('INFRA_PROMPT_TAG')
if infra_prompt_tag is None:
infra_prompt_tag = '[cr go] '
if infra_prompt_tag:
prompt = env.get('PS1')
if prompt and infra_prompt_tag not in prompt:
env['PS1'] = infra_prompt_tag + prompt
return env
def get_go_exe(toolset_root):
"""Returns path to go executable."""
return os.path.join(toolset_root, 'go', 'bin', 'go' + EXE_SFX)
def bootstrap(layout, logging_level, args=None):
"""Installs all dependencies in default locations.
Supposed to be called at the beginning of some script (it modifies logger).
Args:
layout: instance of Layout describing what to install and where.
logging_level: logging level of bootstrap process.
args: positional arguments of bootstrap.py (if any).
Raises:
Failure if bootstrap fails.
"""
logging.basicConfig()
LOGGER.setLevel(logging_level)
# One optional positional argument is a path to write JSON with env diff to.
# This is used by recipes which use it in `with api.context(env=...): ...`.
json_output = None
if args is not None:
parser = argparse.ArgumentParser()
parser.add_argument(
'json_output',
nargs='?',
metavar='PATH',
help='Where to write JSON with necessary environ adjustments')
json_output = parser.parse_args(args=args).json_output
# We need to build and run some Go binaries during bootstrap (e.g. glide), so
# make sure cross-compilation mode is disabled during bootstrap. Restore it
# back once bootstrap is finished.
prev_environ = {}
for k in ('GOOS', 'GOARCH', 'GOARM'):
prev_environ[k] = os.environ.pop(k, None)
try:
toolset_updated = ensure_toolset_installed(
layout.toolset_root, TOOLSET_VERSION)
ensure_glide_installed(layout.toolset_root)
vendor_updated = toolset_updated
for p in layout.vendor_paths:
vendor_updated |= update_vendor_packages(
p, layout.toolset_root, force=toolset_updated)
if toolset_updated:
# GOPATH/pkg may have binaries generated with previous version of toolset,
# they may not be compatible and "go build" isn't smart enough to rebuild
# them.
for p in layout.vendor_paths:
remove_directory([p, 'pkg'])
install_deps_tools(layout, vendor_updated)
finally:
# Restore os.environ back. Have to do it key-by-key to actually modify the
# process environment (replacing os.environ object as a whole does nothing).
for k, v in prev_environ.iteritems():
if v is not None:
os.environ[k] = v
output = get_go_environ_diff(layout)._asdict()
output['go_version'] = TOOLSET_VERSION
json_blob = json.dumps(
output,
sort_keys=True,
indent=2,
separators=(',', ': '))
if json_output == '-':
print json_blob
elif json_output:
with open(json_output, 'w') as f:
f.write(json_blob)
def prepare_go_environ():
"""Returns dict with environment variables to set to use Go toolset.
Installs or updates the toolset and vendored dependencies if necessary.
"""
bootstrap(LAYOUT, logging.INFO)
return get_go_environ(LAYOUT)
def find_executable(name, workspaces):
"""Returns full path to an executable in some bin/ (in GOROOT or GOBIN)."""
basename = name
if EXE_SFX and basename.endswith(EXE_SFX):
basename = basename[:-len(EXE_SFX)]
roots = [os.path.join(LAYOUT.toolset_root, 'go', 'bin')]
for path in workspaces:
roots.extend([
os.path.join(path, '.vendor', 'bin'),
os.path.join(path, 'bin'),
])
for root in roots:
full_path = os.path.join(root, basename + EXE_SFX)
if os.path.exists(full_path):
return full_path
return name
def main(args):
bootstrap(LAYOUT, logging.DEBUG, args)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| en | 0.828993 | #!/usr/bin/env vpython # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Prepares a local hermetic Go installation. - Downloads and unpacks the Go toolset in ../../golang. - Downloads and installs Glide (used by deps.py). - Fetches code dependencies via deps.py. # /path/to/infra # Directory with .gclient file. # The current overarching Infra version. If this changes, everything will be # updated regardless of its version. # Where to install Go toolset to. GOROOT would be <TOOLSET_ROOT>/go. # Default workspace with infra go code. # Platform depended suffix for executable files. # On Windows we use git from depot_tools. # Version of Go CIPD package (infra/3pp/tools/go/${platform}) to install. # Describes how to fetch 'glide'. # Layout is the layout of the bootstrap installation. # The path where the Go toolset is checked out at. # The workspace path. # The list of vendor directories. Each will have a Glide "deps.yaml" in it. # List of paths to append to GOPATH (in additional to `workspace`). # The list of DEPS'd in paths that contain Go sources. This is used to # determine when our vendored tools need to be re-installed. # Go package paths of tools to install into the bootstrap environment. The path where the latest installed Go repository versions are recorded. # A base empty Layout. # Infra standard layout. # Note: please add only tools that really should be in PATH in default # dev environment. # Describes a modification of os.environ, see get_go_environ_diff(...). # {k:v} with vars to set or delete (if v == None) # {k: [path]} with entries to prepend Bootstrap failed. Returns contents of a given file or None if not readable. Writes |data| to a file. Recursively removes a directory. # Crutch to remove read-only file (.git/* in particular) on Windows. Downloads and installs Go toolset from CIPD. GOROOT would be <toolset_root>/go/. Creates a temporary directory, then deletes it. Compiles and runs 'hello world' program to verify that toolset works. package main import "fmt" func main() { fmt.Println("hello, world") } Calls 'go <args>' in the given workspace scrubbing all other Go env vars. Args: toolset_root: where Go is installed at. workspace: value for GOPATH, all other Go-specific env vars are scrubbed. args: command line arguments for 'go' tool. Returns: Captured stripped stdout+stderr. Raises: Failure if the call failed. All details are logged in this case. Installs or updates Go toolset if necessary. Returns True if new toolset was installed. Installs or updates 'glide' tool. # Windows os.rename doesn't support overwrites. Fetches glide source code. Loads the repository version object stored at GO_REPO_VERSIONS. If no version object exists, an empty dictionary will be returned. Records the repository version object, "v", as JSON at GO_REPO_VERSIONS. # Load the current HEAD for our Go dependency paths. # Only install the tools if our checkout versions have changed. # (Re)install all of our Go packages. Runs deps.py to fetch and install pinned packages. Returns (bool): True if the dependencies were actually updated, False if they were already at the correct version. # We will pass "deps.py" the "--update-out" argument, which will create a # file at a temporary path if the deps were actually updated. We use this to # derive our return value. Returns what modifications must be applied to the environ to enable Go. Pure function of 'layout', doesn't depend on current os.environ or state on disk. Args: layout: The Layout to derive the environment from. Returns: EnvironDiff. # Paths to search Go code for. Order is important. # New PATH entries. Order is important. # Don't use default cache in '~'. # Infra Go workspace is not ready for modules yet, attempting to use # them will cause pain. Returns a copy of os.environ with mutated GO* environment variables. This function primarily targets environ on workstations. It assumes the developer may be constantly switching between infra and infra_internal go environments and it has some protection against related edge cases. Args: layout: The Layout to derive the environment from. # Remove preexisting bin/ paths (including .vendor/bin) pointing to infra # or infra_internal Go workspaces. It's important when switching from # infra_internal to infra environments: infra_internal bin paths should # be removed. # we'll move this entry to the front below # TODO(vadimsh): This code knows about gclient checkout layout. # Prepend paths_to_add to PATH. # Add a tag to the prompt Returns path to go executable. Installs all dependencies in default locations. Supposed to be called at the beginning of some script (it modifies logger). Args: layout: instance of Layout describing what to install and where. logging_level: logging level of bootstrap process. args: positional arguments of bootstrap.py (if any). Raises: Failure if bootstrap fails. # One optional positional argument is a path to write JSON with env diff to. # This is used by recipes which use it in `with api.context(env=...): ...`. # We need to build and run some Go binaries during bootstrap (e.g. glide), so # make sure cross-compilation mode is disabled during bootstrap. Restore it # back once bootstrap is finished. # GOPATH/pkg may have binaries generated with previous version of toolset, # they may not be compatible and "go build" isn't smart enough to rebuild # them. # Restore os.environ back. Have to do it key-by-key to actually modify the # process environment (replacing os.environ object as a whole does nothing). Returns dict with environment variables to set to use Go toolset. Installs or updates the toolset and vendored dependencies if necessary. Returns full path to an executable in some bin/ (in GOROOT or GOBIN). | 1.79898 | 2 |
Python/Buch_ATBS/Teil_2/Kapitel_13_Arbeiten_mit_Word_und_PDF_Dokumenten/10_kapitel_13_repetitionsfragen.py | Apop85/Scripts | 0 | 6633023 | # 10_kapitel_13_repetitionsfragen.py
import re
max_text_length=70
max_text_delta=20
def output(title, string):
print('╔'+''.center(max_text_length+8, '═')+'╗')
print('║ '+title.center(max_text_length+7).upper()+'║')
print('╠'+''.center(max_text_length+8, '═')+'╣')
string=string+' '*max_text_length
search_pattern=re.compile(r'\w+.{'+str(max_text_length-max_text_delta-7)+r','+str(max_text_length-7)+r'}[ |.|,|\n|>|\W]', re.DOTALL)
results=search_pattern.findall(string)
for line in results:
print('║ '+line.strip()+'║'.rjust(max_text_length+8-len(line.strip())))
print('╚'+''.center(max_text_length+8, '═')+'╝')
input()
output('Frage 01', 'Der Funktion PyPDF2.PdfFileReader() übergeben sie nicht den Stringwert mit dem Namen der PDF-Datei. Was übergeben sie statdessen?')
output('Antwort', 'Man übergibt die Variabel welche die geöffnete Datei im binary Modus beinhaltet. Bsp: pdf_file_content = PyPDF2.PdfFileReader(open(pdf_file.pdf, "rb"))')
output('Frage 02', 'In welchen Modi müssen File-Objekte für PdfFileReader() und PdfFileWriter() geöffnet sein?')
output('Antwort', 'Read Binary bzw "rb" Beispiel: open(pdf_file.pdf, "rb") oder im Modus Write-Binary für .PdfFileWriter()')
output('Frage 03', 'Wie rufen sie dsa Page-Objekt für die Seite 5 von einem PdfFileReader-Objekt ab?')
output('Antwort', 'Man nutzt page_5=pdf_file_content.getPage(4) wobei ein PDF-Dokument immer mit der Seitenzahl 0 beginnt und daher getPage(4) für die 5. Seite verwendet werden muss.')
output('Frage 04', 'In welcher PdfFileReader-Variable ist die Anzahl der Seiten in einem PDF-Dokument gespeichert?')
output('Antwort', 'pdf_file_content.numPages liest die Anzahl Seiten des PDF\'s aus')
output('Frage 05', 'Was müssen sie tun, bevor sie die Page-Objekte von einem PdfFileReader-Objekt abrufen können, dessen PDF mit dem Passwort "<PASSWORD>" geschützt ist?')
output('Antwort', 'Mit pdf_file_content.decrypt("swordfish") lässt sich die Datei entschlüsseln. Mit .encrypt(passwort) wieder verschlüsseln')
output('Frage 06', 'Welche Methoden verwenden sie, um eine Seite zu drehen?')
output('Antwort', 'Die Seite lässt sich mit page_5.rotateClockwise(90) drehen.')
output('Frage 07', 'Welche Methode gibt ein Document-Objekt für die Datei demo.docx zurück?')
output('Antwort', 'Mit doc_file=docx.Document("demo.docx") lässt sich demo.docx auslesen und in einer Variabel speichern.')
output('Frage 08', 'Was ist der Unterschied zwischen einem Paragraphen- und einem Run-Objekt?')
output('Antwort', 'Die Paragraphen beinhalten den Kompletten Text bis zum nächsten Zeilenumbruch und ist selber wiederum in Runs unterteilt welche das Aussehen der Textabschnitte bestimmt. Jedes mal wen sich die Formatierung ändert entsteht ein neuer Run.')
output('Frage 09', 'Wie rufen sie die Liste der Paragraphen-Objekte für ein Dokument ab das in der Variabel "doc" gespeichert ist?')
output('Antwort', 'Die Funktion doc.paragraphs gibt alle Paragraphen-Objekte als Liste aus')
output('Frage 10', 'Was für Objekte verfügen über die Variablen bold, underline, italic, strike und outline?')
output('Antwort', 'Diese Variablen gehören zum Run-Objekt und definieren ob der Text fett, unterstrichen, schräg, durchgestrichen oder outlined ist. Beispiel: run_objekt.italic=True')
output('Frage 11', 'Was ist der Unterschied zwischen den Werten True, False und None für die Variable bold?')
output('Antwort', 'bold=True heisst dass der Text fett geschrieben wird, bold=False heisst er wird nicht Fett dargestellt und bold=None verwendet die Standardwerte des Run-Objekts')
output('Frage 12', 'Wie erstellen sie ein Document-Objekt für ein neues Word_Dokument?')
output('Antwort', 'Ein neues Dokument kann man mit docx.Document() erstellen')
output('Frage 13', 'Wie fügen sie einem Document-Objekt in der Variablen doc einen Absatz mit dem Text "Hello there" hinzu?')
output('Antwort', 'Mit doc.add_paragraph("Hello there") lässt sich ein neuer Abschnitt mit dem entsprechenden Textinhalt erstellen.')
output('Frage 14', 'Welche Integerwerte können sie verwenden, um in einem Word-Dokument Überschriften-Ebenen anzugeben?')
output('Antwort', 'Mit doc_file.add_heading("Header Text", 0-4) lassen sich Header einfügen.')
| # 10_kapitel_13_repetitionsfragen.py
import re
max_text_length=70
max_text_delta=20
def output(title, string):
print('╔'+''.center(max_text_length+8, '═')+'╗')
print('║ '+title.center(max_text_length+7).upper()+'║')
print('╠'+''.center(max_text_length+8, '═')+'╣')
string=string+' '*max_text_length
search_pattern=re.compile(r'\w+.{'+str(max_text_length-max_text_delta-7)+r','+str(max_text_length-7)+r'}[ |.|,|\n|>|\W]', re.DOTALL)
results=search_pattern.findall(string)
for line in results:
print('║ '+line.strip()+'║'.rjust(max_text_length+8-len(line.strip())))
print('╚'+''.center(max_text_length+8, '═')+'╝')
input()
output('Frage 01', 'Der Funktion PyPDF2.PdfFileReader() übergeben sie nicht den Stringwert mit dem Namen der PDF-Datei. Was übergeben sie statdessen?')
output('Antwort', 'Man übergibt die Variabel welche die geöffnete Datei im binary Modus beinhaltet. Bsp: pdf_file_content = PyPDF2.PdfFileReader(open(pdf_file.pdf, "rb"))')
output('Frage 02', 'In welchen Modi müssen File-Objekte für PdfFileReader() und PdfFileWriter() geöffnet sein?')
output('Antwort', 'Read Binary bzw "rb" Beispiel: open(pdf_file.pdf, "rb") oder im Modus Write-Binary für .PdfFileWriter()')
output('Frage 03', 'Wie rufen sie dsa Page-Objekt für die Seite 5 von einem PdfFileReader-Objekt ab?')
output('Antwort', 'Man nutzt page_5=pdf_file_content.getPage(4) wobei ein PDF-Dokument immer mit der Seitenzahl 0 beginnt und daher getPage(4) für die 5. Seite verwendet werden muss.')
output('Frage 04', 'In welcher PdfFileReader-Variable ist die Anzahl der Seiten in einem PDF-Dokument gespeichert?')
output('Antwort', 'pdf_file_content.numPages liest die Anzahl Seiten des PDF\'s aus')
output('Frage 05', 'Was müssen sie tun, bevor sie die Page-Objekte von einem PdfFileReader-Objekt abrufen können, dessen PDF mit dem Passwort "<PASSWORD>" geschützt ist?')
output('Antwort', 'Mit pdf_file_content.decrypt("swordfish") lässt sich die Datei entschlüsseln. Mit .encrypt(passwort) wieder verschlüsseln')
output('Frage 06', 'Welche Methoden verwenden sie, um eine Seite zu drehen?')
output('Antwort', 'Die Seite lässt sich mit page_5.rotateClockwise(90) drehen.')
output('Frage 07', 'Welche Methode gibt ein Document-Objekt für die Datei demo.docx zurück?')
output('Antwort', 'Mit doc_file=docx.Document("demo.docx") lässt sich demo.docx auslesen und in einer Variabel speichern.')
output('Frage 08', 'Was ist der Unterschied zwischen einem Paragraphen- und einem Run-Objekt?')
output('Antwort', 'Die Paragraphen beinhalten den Kompletten Text bis zum nächsten Zeilenumbruch und ist selber wiederum in Runs unterteilt welche das Aussehen der Textabschnitte bestimmt. Jedes mal wen sich die Formatierung ändert entsteht ein neuer Run.')
output('Frage 09', 'Wie rufen sie die Liste der Paragraphen-Objekte für ein Dokument ab das in der Variabel "doc" gespeichert ist?')
output('Antwort', 'Die Funktion doc.paragraphs gibt alle Paragraphen-Objekte als Liste aus')
output('Frage 10', 'Was für Objekte verfügen über die Variablen bold, underline, italic, strike und outline?')
output('Antwort', 'Diese Variablen gehören zum Run-Objekt und definieren ob der Text fett, unterstrichen, schräg, durchgestrichen oder outlined ist. Beispiel: run_objekt.italic=True')
output('Frage 11', 'Was ist der Unterschied zwischen den Werten True, False und None für die Variable bold?')
output('Antwort', 'bold=True heisst dass der Text fett geschrieben wird, bold=False heisst er wird nicht Fett dargestellt und bold=None verwendet die Standardwerte des Run-Objekts')
output('Frage 12', 'Wie erstellen sie ein Document-Objekt für ein neues Word_Dokument?')
output('Antwort', 'Ein neues Dokument kann man mit docx.Document() erstellen')
output('Frage 13', 'Wie fügen sie einem Document-Objekt in der Variablen doc einen Absatz mit dem Text "Hello there" hinzu?')
output('Antwort', 'Mit doc.add_paragraph("Hello there") lässt sich ein neuer Abschnitt mit dem entsprechenden Textinhalt erstellen.')
output('Frage 14', 'Welche Integerwerte können sie verwenden, um in einem Word-Dokument Überschriften-Ebenen anzugeben?')
output('Antwort', 'Mit doc_file.add_heading("Header Text", 0-4) lassen sich Header einfügen.')
| de | 0.283335 | # 10_kapitel_13_repetitionsfragen.py | 3.096758 | 3 |
aps/tokenizer/__init__.py | ishine/aps | 117 | 6633024 | from .base import Tokenizer
from .subword import SubwordTokenizer
from .word import WordTokenizer, CharTokenizer
| from .base import Tokenizer
from .subword import SubwordTokenizer
from .word import WordTokenizer, CharTokenizer
| none | 1 | 1.222238 | 1 |
|
registration/admin.py | AVS18/Hospital-Management-System | 0 | 6633025 | <reponame>AVS18/Hospital-Management-System<gh_stars>0
from django.contrib import admin
from .models import User
from .models import Profile
from .models import Appointments
from .models import Prescription
from .models import Invoice
class Customize_userreges(admin.ModelAdmin):
list_display=['first_name','last_name','username','email','profession']
list_filter=(['profession'])
class Customize_Profile(admin.ModelAdmin):
list_display=['username','gender','age','aptname','stname','cityname','phone','profession','MedicalHistory']
list_filter=(['profession','insurance'])
class Customize_Appointments(admin.ModelAdmin):
list_display=['duser','date','puser','time','status','disease']
list_filter=(['duser','puser','disease'])
class Customize_Prescription(admin.ModelAdmin):
list_display=['duser','disease','puser','date','care','medicine']
list_filter=(['disease','medicine'])
class Customize_Invoice(admin.ModelAdmin):
list_display=['puser','duser','amount','disease','payment']
list_filter=(['puser','duser','disease','payment'])
admin.site.register(User,Customize_userreges)
admin.site.register(Profile,Customize_Profile)
admin.site.register(Appointments,Customize_Appointments)
admin.site.register(Prescription,Customize_Prescription)
admin.site.register(Invoice,Customize_Invoice) | from django.contrib import admin
from .models import User
from .models import Profile
from .models import Appointments
from .models import Prescription
from .models import Invoice
class Customize_userreges(admin.ModelAdmin):
list_display=['first_name','last_name','username','email','profession']
list_filter=(['profession'])
class Customize_Profile(admin.ModelAdmin):
list_display=['username','gender','age','aptname','stname','cityname','phone','profession','MedicalHistory']
list_filter=(['profession','insurance'])
class Customize_Appointments(admin.ModelAdmin):
list_display=['duser','date','puser','time','status','disease']
list_filter=(['duser','puser','disease'])
class Customize_Prescription(admin.ModelAdmin):
list_display=['duser','disease','puser','date','care','medicine']
list_filter=(['disease','medicine'])
class Customize_Invoice(admin.ModelAdmin):
list_display=['puser','duser','amount','disease','payment']
list_filter=(['puser','duser','disease','payment'])
admin.site.register(User,Customize_userreges)
admin.site.register(Profile,Customize_Profile)
admin.site.register(Appointments,Customize_Appointments)
admin.site.register(Prescription,Customize_Prescription)
admin.site.register(Invoice,Customize_Invoice) | none | 1 | 1.87858 | 2 |
|
polybar/.config/polybar/taskwarrior/rofi.py | stanham33/dotfiles | 0 | 6633026 | <reponame>stanham33/dotfiles
#
# python-rofi
#
# The MIT License
#
# Copyright (c) 2016 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import atexit
from decimal import Decimal, InvalidOperation
import re
import signal
import subprocess
class Rofi(object):
"""Class to facilitate making simple GUIs with Rofi.
Rofi is a popup window system with minimal dependencies (xlib and pango).
It was designed as a window switcher. Its basic operation is to display a
list of options and let the user pick one.
This class provides a set of methods to make simple GUIs with Rofi. It does
this by using the subprocess module to call Rofi externally. Many of the
methods are blocking.
Some strings can contain Pango markup for additional formatting (those that
can are noted as such in the docstrings). Any text in these strings *must*
be escaped before calling Rofi. The class method Rofi.escape() performs
this escaping for you. Make sure you call this on the text prior to adding
Pango markup, otherwise the markup will be escaped and displayed to the
user. See https://developer.gnome.org/pango/stable/PangoMarkupFormat.html
for available markup.
"""
def __init__(self, lines=None, fixed_lines=None, width=None,
fullscreen=None, location=None,
exit_hotkeys=('Alt+F4', 'Control+q')):
"""
Parameters
----------
exit_hotkeys: tuple of strings
Hotkeys to use to exit the application. These will be automatically
set and handled in any method which takes hotkey arguments. If one
of these hotkeys is pressed, a SystemExit will be raised to perform
the exit.
The following parameters set default values for various layout options,
and can be overwritten in any display method. A value of None means
use the system default, which may be set by a configuration file or
fall back to the compile-time default. See the Rofi documentation for
full details on what the values mean.
lines: positive integer
The maximum number of lines to show before scrolling.
fixed_lines: positive integer
Keep a fixed number of lines visible.
width: real
If positive but not more than 100, this is the percentage of the
screen's width the window takes up. If greater than 100, it is the
width in pixels. If negative, it estimates the width required for
the corresponding number of characters, i.e., -30 would set the
width so ~30 characters per row would show.
fullscreen: boolean
If True, use the full height and width of the screen.
location: integer
The position of the window on the screen.
"""
# The Popen class returned for any non-blocking windows.
self._process = None
# Save parameters.
self.lines = lines
self.fixed_lines = fixed_lines
self.width = width
self.fullscreen = fullscreen
self.location = location
self.exit_hotkeys = exit_hotkeys
# Don't want a window left on the screen if we exit unexpectedly
# (e.g., an unhandled exception).
atexit.register(self.close)
@classmethod
def escape(self, string):
"""Escape a string for Pango markup.
Parameters
----------
string:
A piece of text to escape.
Returns
-------
The text, safe for use in with Pango markup.
"""
# Escape ampersands first, then other entities. Since argument is a
# dictionary, we can't guarantee order of translations and so doing it
# in one go would risk the ampersands in other translations being
# escaped again.
return string.translate(
{38: '&'}
).translate({
34: '"',
39: ''',
60: '<',
62: '>'
})
def close(self):
"""Close any open window.
Note that this only works with non-blocking methods.
"""
if self._process:
# Be nice first.
self._process.send_signal(signal.SIGINT)
# If it doesn't close itself promptly, be brutal.
try:
self._process.wait(timeout=1)
except subprocess.TimeoutExpired:
self._process.send_signal(signal.SIGKILL)
# Clean up.
self._process = None
def _common_args(self, allow_fullscreen=True, **kwargs):
args = []
# Number of lines.
lines = kwargs.get('lines', self.lines)
if lines:
args.extend(['-lines', str(lines)])
fixed_lines = kwargs.get('fixed_lines', self.fixed_lines)
if fixed_lines:
args.extend(['-fixed-num-lines', str(fixed_lines)])
# Width.
width = kwargs.get('width', self.width)
if width is not None:
args.extend(['-width', str(width)])
# Fullscreen mode?
fullscreen = kwargs.get('fullscreen', self.fullscreen)
if allow_fullscreen and fullscreen:
args.append('-fullscreen')
# Location on screen.
location = kwargs.get('location', self.location)
if location is not None:
args.extend(['-location', str(location)])
# Done.
return args
def error(self, message, **kwargs):
"""Show an error window.
This method blocks until the user presses a key.
Fullscreen mode is not supported for error windows, and if specified
will be ignored.
Parameters
----------
message: string
Error message to show.
"""
# Generate arguments list.
args = ['rofi', '-e', message]
args.extend(self._common_args(allow_fullscreen=False, **kwargs))
# Close any existing window and show the error.
self.close()
subprocess.run(args)
def status(self, message, **kwargs):
"""Show a status message.
This method is non-blocking, and intended to give a status update to
the user while something is happening in the background.
To close the window, either call the close() method or use any of the
display methods to replace it with a different window.
Fullscreen mode is not supported for status messages and if specified
will be ignored.
Parameters
----------
message: string
Progress message to show.
"""
# Generate arguments list.
args = ['rofi', '-e', message]
args.extend(self._common_args(allow_fullscreen=False, **kwargs))
# Close any existing window, show the error, and return immediately.
self.close()
self._process = subprocess.Popen(args)
def select(self, prompt, options, message="", select=None, **kwargs):
"""Show a list of options and return user selection.
This method blocks until the user makes their choice.
Parameters
----------
prompt: string
The prompt telling the user what they are selecting.
options: list of strings
The options they can choose from. Any newline characters are
replaced with spaces.
message: string, optional
Message to show between the prompt and the options. This can
contain Pango markup, and any text content should be escaped.
select: integer, optional
Set which option is initially selected.
keyN: tuple (string, string); optional
Custom key bindings where N is one or greater. The first entry in
the tuple should be a string defining the key, e.g., "Alt+x" or
"Delete". Note that letter keys should be lowercase ie.e., Alt+a
not Alt+A.
The second entry should be a short string stating the action the
key will take. This is displayed to the user at the top of the
dialog. If None or an empty string, it is not displayed (but the
binding is still set).
By default, key1 through key9 are set to ("Alt+1", None) through
("Alt+9", None) respectively.
Returns
-------
tuple (index, key)
The index of the option the user selected, or -1 if they cancelled
the dialog.
Key indicates which key was pressed, with 0 being 'OK' (generally
Enter), -1 being 'Cancel' (generally escape), and N being custom
key N.
"""
# Replace newlines and turn the options into a single string.
optionstr = '\n'.join(option.replace('\n', ' ') for option in options)
# Set up arguments.
args = ['rofi', '-dmenu', '-p', prompt, '-format', 'i']
if select is not None:
args.extend(['-selected-row', str(select)])
# Key bindings to display.
display_bindings = []
# Configure the key bindings.
user_keys = set()
for k, v in kwargs.items():
match = re.fullmatch(r'key(\d+)', k)
if match:
key, action = v
user_keys.add(int(match.group(1)))
args.extend(['-kb-custom-{0:s}'.format(match.group(1)), key])
if action:
display_bindings.append("<b>{0:s}</b>: {1:s}".format(key, action))
# And the global exit bindings.
exit_keys = set()
next_key = 10
for key in self.exit_hotkeys:
while next_key in user_keys:
next_key += 1
exit_keys.add(next_key)
args.extend(['-kb-custom-{0:d}'.format(next_key), key])
next_key += 1
# Add any displayed key bindings to the message.
message = message or ""
if display_bindings:
message += "\n" + " ".join(display_bindings)
message = message.strip()
# If we have a message, add it to the arguments.
if message:
args.extend(['-mesg', message])
# Add in common arguments.
args.extend(self._common_args(**kwargs))
# Run the dialog.
self.close()
results = subprocess.run(args, input=optionstr, stdout=subprocess.PIPE,
universal_newlines=True)
# Figure out which option was selected.
stdout = results.stdout.strip()
index = int(stdout) if stdout else -1
# And map the return code to a key.
if results.returncode == 0:
key = 0
elif results.returncode == 1:
key = -1
elif results.returncode > 9:
key = results.returncode - 9
if key in exit_keys:
raise SystemExit()
else:
self.exit_with_error("Unexpected rofi returncode {0:d}.".format(results.returncode))
# And return.
return index, key
def _generic_entry(self, prompt, validator, message=None, **kwargs):
"""Internal helper method for entry methods.
Parameters
----------
prompt: string
Text prompt for the entry.
validator: function
Function which takes the string the user entered and returns a
tuple (value, error). Value is the entered value converted to the
appropriate Python type ready for returning, and error is either a
string if the entered text was invalid, or None if it was valid.
message: string
Optional message to display under the entry.
Returns
-------
The value returned by the validator, or None if the dialog was
cancelled.
"""
error = ""
# Keep going until we get something valid.
while True:
args = ['rofi', '-dmenu', '-p', prompt, '-format', 's']
# Add any error to the given message.
msg = message or ""
if error:
msg = '<span color="#FF0000" font_weight="bold">{0:s}</span>\n{1:s}'.format(error, msg)
msg = msg.rstrip('\n')
# If there is actually a message to show.
if msg:
args.extend(['-mesg', msg])
# Add in common arguments.
args.extend(self._common_args(**kwargs))
# Run it.
self.close()
results = subprocess.run(args, input="", stdout=subprocess.PIPE,
universal_newlines=True)
# Was the dialog cancelled?
if results.returncode == 1:
return None
# Get rid of the trailing newline and check its validity.
value, error = validator(results.stdout.rstrip('\n'))
if not error:
return value
def text_entry(self, prompt, message=None, allow_blank=False, strip=True):
"""Prompt the user to enter a piece of text.
Parameters
----------
prompt: string
Prompt to display to the user.
message: string, optional
Message to display under the entry line.
allow_blank: Boolean
Whether to allow blank entries.
strip: Boolean
Whether to strip leading and trailing whitespace from the entered
value.
Returns
-------
string, or None if the dialog was cancelled.
"""
def text_validator(text):
if strip:
text = text.strip()
if not allow_blank:
if not text:
return None, "A value is required."
return text, None
return self._generic_entry(prompt, text_validator, message)
def integer_entry(self, prompt, message=None, min=None, max=None):
"""Prompt the user to enter an integer.
Parameters
----------
prompt: string
Prompt to display to the user.
message: string, optional
Message to display under the entry line.
min, max: integer, optional
Minimum and maximum values to allow. If None, no limit is imposed.
Returns
-------
integer, or None if the dialog is cancelled.
"""
# Sanity check.
if (min is not None) and (max is not None) and not (max > min):
raise ValueError("Maximum limit has to be more than the minimum limit.")
def integer_validator(text):
error = None
# Attempt to convert to integer.
try:
value = int(text)
except ValueError:
return None, "Please enter an integer value."
# Check its within limits.
if (min is not None) and (value < min):
return None, "The minimum allowable value is {0:d}.".format(min)
if (max is not None) and (value > max):
return None, "The maximum allowable value is {0:d}.".format(max)
return value, None
return self._generic_entry(prompt, integer_validator, message)
def float_entry(self, prompt, message=None, min=None, max=None):
"""Prompt the user to enter a floating point number.
Parameters
----------
prompt: string
Prompt to display to the user.
message: string, optional
Message to display under the entry line.
min, max: float, optional
Minimum and maximum values to allow. If None, no limit is imposed.
Returns
-------
float, or None if the dialog is cancelled.
"""
# Sanity check.
if (min is not None) and (max is not None) and not (max > min):
raise ValueError("Maximum limit has to be more than the minimum limit.")
def float_validator(text):
error = None
# Attempt to convert to float.
try:
value = float(text)
except ValueError:
return None, "Please enter a floating point value."
# Check its within limits.
if (min is not None) and (value < min):
return None, "The minimum allowable value is {0}.".format(min)
if (max is not None) and (value > max):
return None, "The maximum allowable value is {0}.".format(max)
return value, None
return self._generic_entry(prompt, float_validator, message)
def decimal_entry(self, prompt, message=None, min=None, max=None):
"""Prompt the user to enter a decimal number.
Parameters
----------
prompt: string
Prompt to display to the user.
message: string, optional
Message to display under the entry line.
min, max: Decimal, optional
Minimum and maximum values to allow. If None, no limit is imposed.
Returns
-------
Decimal, or None if the dialog is cancelled.
"""
# Sanity check.
if (min is not None) and (max is not None) and not (max > min):
raise ValueError("Maximum limit has to be more than the minimum limit.")
def decimal_validator(text):
error = None
# Attempt to convert to decimal.
try:
value = Decimal(text)
except InvalidOperation:
return None, "Please enter a decimal value."
# Check its within limits.
if (min is not None) and (value < min):
return None, "The minimum allowable value is {0}.".format(min)
if (max is not None) and (value > max):
return None, "The maximum allowable value is {0}.".format(max)
return value, None
return self._generic_entry(prompt, decimal_validator, message)
def exit_with_error(self, error):
"""Report an error and exit.
This raises a SystemExit exception to ask the interpreter to quit.
Parameters
----------
error: string
The error to report before quitting.
"""
self.error(error)
raise SystemExit(error)
| #
# python-rofi
#
# The MIT License
#
# Copyright (c) 2016 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import atexit
from decimal import Decimal, InvalidOperation
import re
import signal
import subprocess
class Rofi(object):
"""Class to facilitate making simple GUIs with Rofi.
Rofi is a popup window system with minimal dependencies (xlib and pango).
It was designed as a window switcher. Its basic operation is to display a
list of options and let the user pick one.
This class provides a set of methods to make simple GUIs with Rofi. It does
this by using the subprocess module to call Rofi externally. Many of the
methods are blocking.
Some strings can contain Pango markup for additional formatting (those that
can are noted as such in the docstrings). Any text in these strings *must*
be escaped before calling Rofi. The class method Rofi.escape() performs
this escaping for you. Make sure you call this on the text prior to adding
Pango markup, otherwise the markup will be escaped and displayed to the
user. See https://developer.gnome.org/pango/stable/PangoMarkupFormat.html
for available markup.
"""
def __init__(self, lines=None, fixed_lines=None, width=None,
fullscreen=None, location=None,
exit_hotkeys=('Alt+F4', 'Control+q')):
"""
Parameters
----------
exit_hotkeys: tuple of strings
Hotkeys to use to exit the application. These will be automatically
set and handled in any method which takes hotkey arguments. If one
of these hotkeys is pressed, a SystemExit will be raised to perform
the exit.
The following parameters set default values for various layout options,
and can be overwritten in any display method. A value of None means
use the system default, which may be set by a configuration file or
fall back to the compile-time default. See the Rofi documentation for
full details on what the values mean.
lines: positive integer
The maximum number of lines to show before scrolling.
fixed_lines: positive integer
Keep a fixed number of lines visible.
width: real
If positive but not more than 100, this is the percentage of the
screen's width the window takes up. If greater than 100, it is the
width in pixels. If negative, it estimates the width required for
the corresponding number of characters, i.e., -30 would set the
width so ~30 characters per row would show.
fullscreen: boolean
If True, use the full height and width of the screen.
location: integer
The position of the window on the screen.
"""
# The Popen class returned for any non-blocking windows.
self._process = None
# Save parameters.
self.lines = lines
self.fixed_lines = fixed_lines
self.width = width
self.fullscreen = fullscreen
self.location = location
self.exit_hotkeys = exit_hotkeys
# Don't want a window left on the screen if we exit unexpectedly
# (e.g., an unhandled exception).
atexit.register(self.close)
@classmethod
def escape(self, string):
"""Escape a string for Pango markup.
Parameters
----------
string:
A piece of text to escape.
Returns
-------
The text, safe for use in with Pango markup.
"""
# Escape ampersands first, then other entities. Since argument is a
# dictionary, we can't guarantee order of translations and so doing it
# in one go would risk the ampersands in other translations being
# escaped again.
return string.translate(
{38: '&'}
).translate({
34: '"',
39: ''',
60: '<',
62: '>'
})
def close(self):
"""Close any open window.
Note that this only works with non-blocking methods.
"""
if self._process:
# Be nice first.
self._process.send_signal(signal.SIGINT)
# If it doesn't close itself promptly, be brutal.
try:
self._process.wait(timeout=1)
except subprocess.TimeoutExpired:
self._process.send_signal(signal.SIGKILL)
# Clean up.
self._process = None
def _common_args(self, allow_fullscreen=True, **kwargs):
args = []
# Number of lines.
lines = kwargs.get('lines', self.lines)
if lines:
args.extend(['-lines', str(lines)])
fixed_lines = kwargs.get('fixed_lines', self.fixed_lines)
if fixed_lines:
args.extend(['-fixed-num-lines', str(fixed_lines)])
# Width.
width = kwargs.get('width', self.width)
if width is not None:
args.extend(['-width', str(width)])
# Fullscreen mode?
fullscreen = kwargs.get('fullscreen', self.fullscreen)
if allow_fullscreen and fullscreen:
args.append('-fullscreen')
# Location on screen.
location = kwargs.get('location', self.location)
if location is not None:
args.extend(['-location', str(location)])
# Done.
return args
def error(self, message, **kwargs):
"""Show an error window.
This method blocks until the user presses a key.
Fullscreen mode is not supported for error windows, and if specified
will be ignored.
Parameters
----------
message: string
Error message to show.
"""
# Generate arguments list.
args = ['rofi', '-e', message]
args.extend(self._common_args(allow_fullscreen=False, **kwargs))
# Close any existing window and show the error.
self.close()
subprocess.run(args)
def status(self, message, **kwargs):
"""Show a status message.
This method is non-blocking, and intended to give a status update to
the user while something is happening in the background.
To close the window, either call the close() method or use any of the
display methods to replace it with a different window.
Fullscreen mode is not supported for status messages and if specified
will be ignored.
Parameters
----------
message: string
Progress message to show.
"""
# Generate arguments list.
args = ['rofi', '-e', message]
args.extend(self._common_args(allow_fullscreen=False, **kwargs))
# Close any existing window, show the error, and return immediately.
self.close()
self._process = subprocess.Popen(args)
def select(self, prompt, options, message="", select=None, **kwargs):
"""Show a list of options and return user selection.
This method blocks until the user makes their choice.
Parameters
----------
prompt: string
The prompt telling the user what they are selecting.
options: list of strings
The options they can choose from. Any newline characters are
replaced with spaces.
message: string, optional
Message to show between the prompt and the options. This can
contain Pango markup, and any text content should be escaped.
select: integer, optional
Set which option is initially selected.
keyN: tuple (string, string); optional
Custom key bindings where N is one or greater. The first entry in
the tuple should be a string defining the key, e.g., "Alt+x" or
"Delete". Note that letter keys should be lowercase ie.e., Alt+a
not Alt+A.
The second entry should be a short string stating the action the
key will take. This is displayed to the user at the top of the
dialog. If None or an empty string, it is not displayed (but the
binding is still set).
By default, key1 through key9 are set to ("Alt+1", None) through
("Alt+9", None) respectively.
Returns
-------
tuple (index, key)
The index of the option the user selected, or -1 if they cancelled
the dialog.
Key indicates which key was pressed, with 0 being 'OK' (generally
Enter), -1 being 'Cancel' (generally escape), and N being custom
key N.
"""
# Replace newlines and turn the options into a single string.
optionstr = '\n'.join(option.replace('\n', ' ') for option in options)
# Set up arguments.
args = ['rofi', '-dmenu', '-p', prompt, '-format', 'i']
if select is not None:
args.extend(['-selected-row', str(select)])
# Key bindings to display.
display_bindings = []
# Configure the key bindings.
user_keys = set()
for k, v in kwargs.items():
match = re.fullmatch(r'key(\d+)', k)
if match:
key, action = v
user_keys.add(int(match.group(1)))
args.extend(['-kb-custom-{0:s}'.format(match.group(1)), key])
if action:
display_bindings.append("<b>{0:s}</b>: {1:s}".format(key, action))
# And the global exit bindings.
exit_keys = set()
next_key = 10
for key in self.exit_hotkeys:
while next_key in user_keys:
next_key += 1
exit_keys.add(next_key)
args.extend(['-kb-custom-{0:d}'.format(next_key), key])
next_key += 1
# Add any displayed key bindings to the message.
message = message or ""
if display_bindings:
message += "\n" + " ".join(display_bindings)
message = message.strip()
# If we have a message, add it to the arguments.
if message:
args.extend(['-mesg', message])
# Add in common arguments.
args.extend(self._common_args(**kwargs))
# Run the dialog.
self.close()
results = subprocess.run(args, input=optionstr, stdout=subprocess.PIPE,
universal_newlines=True)
# Figure out which option was selected.
stdout = results.stdout.strip()
index = int(stdout) if stdout else -1
# And map the return code to a key.
if results.returncode == 0:
key = 0
elif results.returncode == 1:
key = -1
elif results.returncode > 9:
key = results.returncode - 9
if key in exit_keys:
raise SystemExit()
else:
self.exit_with_error("Unexpected rofi returncode {0:d}.".format(results.returncode))
# And return.
return index, key
def _generic_entry(self, prompt, validator, message=None, **kwargs):
"""Internal helper method for entry methods.
Parameters
----------
prompt: string
Text prompt for the entry.
validator: function
Function which takes the string the user entered and returns a
tuple (value, error). Value is the entered value converted to the
appropriate Python type ready for returning, and error is either a
string if the entered text was invalid, or None if it was valid.
message: string
Optional message to display under the entry.
Returns
-------
The value returned by the validator, or None if the dialog was
cancelled.
"""
error = ""
# Keep going until we get something valid.
while True:
args = ['rofi', '-dmenu', '-p', prompt, '-format', 's']
# Add any error to the given message.
msg = message or ""
if error:
msg = '<span color="#FF0000" font_weight="bold">{0:s}</span>\n{1:s}'.format(error, msg)
msg = msg.rstrip('\n')
# If there is actually a message to show.
if msg:
args.extend(['-mesg', msg])
# Add in common arguments.
args.extend(self._common_args(**kwargs))
# Run it.
self.close()
results = subprocess.run(args, input="", stdout=subprocess.PIPE,
universal_newlines=True)
# Was the dialog cancelled?
if results.returncode == 1:
return None
# Get rid of the trailing newline and check its validity.
value, error = validator(results.stdout.rstrip('\n'))
if not error:
return value
def text_entry(self, prompt, message=None, allow_blank=False, strip=True):
"""Prompt the user to enter a piece of text.
Parameters
----------
prompt: string
Prompt to display to the user.
message: string, optional
Message to display under the entry line.
allow_blank: Boolean
Whether to allow blank entries.
strip: Boolean
Whether to strip leading and trailing whitespace from the entered
value.
Returns
-------
string, or None if the dialog was cancelled.
"""
def text_validator(text):
if strip:
text = text.strip()
if not allow_blank:
if not text:
return None, "A value is required."
return text, None
return self._generic_entry(prompt, text_validator, message)
def integer_entry(self, prompt, message=None, min=None, max=None):
"""Prompt the user to enter an integer.
Parameters
----------
prompt: string
Prompt to display to the user.
message: string, optional
Message to display under the entry line.
min, max: integer, optional
Minimum and maximum values to allow. If None, no limit is imposed.
Returns
-------
integer, or None if the dialog is cancelled.
"""
# Sanity check.
if (min is not None) and (max is not None) and not (max > min):
raise ValueError("Maximum limit has to be more than the minimum limit.")
def integer_validator(text):
error = None
# Attempt to convert to integer.
try:
value = int(text)
except ValueError:
return None, "Please enter an integer value."
# Check its within limits.
if (min is not None) and (value < min):
return None, "The minimum allowable value is {0:d}.".format(min)
if (max is not None) and (value > max):
return None, "The maximum allowable value is {0:d}.".format(max)
return value, None
return self._generic_entry(prompt, integer_validator, message)
def float_entry(self, prompt, message=None, min=None, max=None):
"""Prompt the user to enter a floating point number.
Parameters
----------
prompt: string
Prompt to display to the user.
message: string, optional
Message to display under the entry line.
min, max: float, optional
Minimum and maximum values to allow. If None, no limit is imposed.
Returns
-------
float, or None if the dialog is cancelled.
"""
# Sanity check.
if (min is not None) and (max is not None) and not (max > min):
raise ValueError("Maximum limit has to be more than the minimum limit.")
def float_validator(text):
error = None
# Attempt to convert to float.
try:
value = float(text)
except ValueError:
return None, "Please enter a floating point value."
# Check its within limits.
if (min is not None) and (value < min):
return None, "The minimum allowable value is {0}.".format(min)
if (max is not None) and (value > max):
return None, "The maximum allowable value is {0}.".format(max)
return value, None
return self._generic_entry(prompt, float_validator, message)
def decimal_entry(self, prompt, message=None, min=None, max=None):
"""Prompt the user to enter a decimal number.
Parameters
----------
prompt: string
Prompt to display to the user.
message: string, optional
Message to display under the entry line.
min, max: Decimal, optional
Minimum and maximum values to allow. If None, no limit is imposed.
Returns
-------
Decimal, or None if the dialog is cancelled.
"""
# Sanity check.
if (min is not None) and (max is not None) and not (max > min):
raise ValueError("Maximum limit has to be more than the minimum limit.")
def decimal_validator(text):
error = None
# Attempt to convert to decimal.
try:
value = Decimal(text)
except InvalidOperation:
return None, "Please enter a decimal value."
# Check its within limits.
if (min is not None) and (value < min):
return None, "The minimum allowable value is {0}.".format(min)
if (max is not None) and (value > max):
return None, "The maximum allowable value is {0}.".format(max)
return value, None
return self._generic_entry(prompt, decimal_validator, message)
def exit_with_error(self, error):
"""Report an error and exit.
This raises a SystemExit exception to ask the interpreter to quit.
Parameters
----------
error: string
The error to report before quitting.
"""
self.error(error)
raise SystemExit(error) | en | 0.781453 | # # python-rofi # # The MIT License # # Copyright (c) 2016 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Class to facilitate making simple GUIs with Rofi. Rofi is a popup window system with minimal dependencies (xlib and pango). It was designed as a window switcher. Its basic operation is to display a list of options and let the user pick one. This class provides a set of methods to make simple GUIs with Rofi. It does this by using the subprocess module to call Rofi externally. Many of the methods are blocking. Some strings can contain Pango markup for additional formatting (those that can are noted as such in the docstrings). Any text in these strings *must* be escaped before calling Rofi. The class method Rofi.escape() performs this escaping for you. Make sure you call this on the text prior to adding Pango markup, otherwise the markup will be escaped and displayed to the user. See https://developer.gnome.org/pango/stable/PangoMarkupFormat.html for available markup. Parameters ---------- exit_hotkeys: tuple of strings Hotkeys to use to exit the application. These will be automatically set and handled in any method which takes hotkey arguments. If one of these hotkeys is pressed, a SystemExit will be raised to perform the exit. The following parameters set default values for various layout options, and can be overwritten in any display method. A value of None means use the system default, which may be set by a configuration file or fall back to the compile-time default. See the Rofi documentation for full details on what the values mean. lines: positive integer The maximum number of lines to show before scrolling. fixed_lines: positive integer Keep a fixed number of lines visible. width: real If positive but not more than 100, this is the percentage of the screen's width the window takes up. If greater than 100, it is the width in pixels. If negative, it estimates the width required for the corresponding number of characters, i.e., -30 would set the width so ~30 characters per row would show. fullscreen: boolean If True, use the full height and width of the screen. location: integer The position of the window on the screen. # The Popen class returned for any non-blocking windows. # Save parameters. # Don't want a window left on the screen if we exit unexpectedly # (e.g., an unhandled exception). Escape a string for Pango markup. Parameters ---------- string: A piece of text to escape. Returns ------- The text, safe for use in with Pango markup. # Escape ampersands first, then other entities. Since argument is a # dictionary, we can't guarantee order of translations and so doing it # in one go would risk the ampersands in other translations being # escaped again. Close any open window. Note that this only works with non-blocking methods. # Be nice first. # If it doesn't close itself promptly, be brutal. # Clean up. # Number of lines. # Width. # Fullscreen mode? # Location on screen. # Done. Show an error window. This method blocks until the user presses a key. Fullscreen mode is not supported for error windows, and if specified will be ignored. Parameters ---------- message: string Error message to show. # Generate arguments list. # Close any existing window and show the error. Show a status message. This method is non-blocking, and intended to give a status update to the user while something is happening in the background. To close the window, either call the close() method or use any of the display methods to replace it with a different window. Fullscreen mode is not supported for status messages and if specified will be ignored. Parameters ---------- message: string Progress message to show. # Generate arguments list. # Close any existing window, show the error, and return immediately. Show a list of options and return user selection. This method blocks until the user makes their choice. Parameters ---------- prompt: string The prompt telling the user what they are selecting. options: list of strings The options they can choose from. Any newline characters are replaced with spaces. message: string, optional Message to show between the prompt and the options. This can contain Pango markup, and any text content should be escaped. select: integer, optional Set which option is initially selected. keyN: tuple (string, string); optional Custom key bindings where N is one or greater. The first entry in the tuple should be a string defining the key, e.g., "Alt+x" or "Delete". Note that letter keys should be lowercase ie.e., Alt+a not Alt+A. The second entry should be a short string stating the action the key will take. This is displayed to the user at the top of the dialog. If None or an empty string, it is not displayed (but the binding is still set). By default, key1 through key9 are set to ("Alt+1", None) through ("Alt+9", None) respectively. Returns ------- tuple (index, key) The index of the option the user selected, or -1 if they cancelled the dialog. Key indicates which key was pressed, with 0 being 'OK' (generally Enter), -1 being 'Cancel' (generally escape), and N being custom key N. # Replace newlines and turn the options into a single string. # Set up arguments. # Key bindings to display. # Configure the key bindings. # And the global exit bindings. # Add any displayed key bindings to the message. # If we have a message, add it to the arguments. # Add in common arguments. # Run the dialog. # Figure out which option was selected. # And map the return code to a key. # And return. Internal helper method for entry methods. Parameters ---------- prompt: string Text prompt for the entry. validator: function Function which takes the string the user entered and returns a tuple (value, error). Value is the entered value converted to the appropriate Python type ready for returning, and error is either a string if the entered text was invalid, or None if it was valid. message: string Optional message to display under the entry. Returns ------- The value returned by the validator, or None if the dialog was cancelled. # Keep going until we get something valid. # Add any error to the given message. # If there is actually a message to show. # Add in common arguments. # Run it. # Was the dialog cancelled? # Get rid of the trailing newline and check its validity. Prompt the user to enter a piece of text. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. allow_blank: Boolean Whether to allow blank entries. strip: Boolean Whether to strip leading and trailing whitespace from the entered value. Returns ------- string, or None if the dialog was cancelled. Prompt the user to enter an integer. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: integer, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- integer, or None if the dialog is cancelled. # Sanity check. # Attempt to convert to integer. # Check its within limits. Prompt the user to enter a floating point number. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: float, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- float, or None if the dialog is cancelled. # Sanity check. # Attempt to convert to float. # Check its within limits. Prompt the user to enter a decimal number. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: Decimal, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- Decimal, or None if the dialog is cancelled. # Sanity check. # Attempt to convert to decimal. # Check its within limits. Report an error and exit. This raises a SystemExit exception to ask the interpreter to quit. Parameters ---------- error: string The error to report before quitting. | 2.336635 | 2 |
decompile_all_EO3_ai.py | LumenTheFairy/Etrian-Odyssey-Data-Interpreter | 4 | 6633027 | <gh_stars>1-10
#!/usr/bin/python
# coding: utf-8
# Decompiles all AI files, naming, to the extent possible, skills
# and the entities that use each AI file (if any)
# assumes directory structure:
# ./EO3/
# AI/
# BtlBSTScrFileTable.tbl
# BtlNPCScrFileTable.tbl
# BtlScrFileTable.tbl
# *.bf
# Skills/
# enemyskillnametable.tbl
# playerskillnametable.tbl
# ...
# Enemy/
# enemynametable.tbl
# ...
# ...
# out_EO3/
# AI/
# decompiled/
# enemy/
# ally/
# summon/
# ...
# ...
# written by TheOnlyOne (@modest_ralts)
import argparse
from sys import stderr
import os
import unpack_EO_name_table
import unpack_ai_proc_list
import unpack_ai
import decompile_ai
def parseArguments():
# Create argument parser
parser = argparse.ArgumentParser(description="Decompiles all Etrian Odyssey 3 AI files, naming skills and entities to the extent possible.")
parser.add_argument("--fully_optimize", action="store_true", help="all optimization passes will be performed on the code; specific optimization flags will be ignored")
parser.add_argument("--flatten_conditionals", action="store_true", help="(if . else if . else .) will be converted to (if . elif . else.) when permissable to reduce the nesting depth and resulting indentation of code")
parser.add_argument("--flatten_elses", action="store_true", help="(if t return else f ) will be converted to (if t return f) when permissable to reduce the nesting depth and resulting indentation of code")
parser.add_argument("--constant_folding", action="store_true", help="any arithmetic containing only constants will be replaced with the value of that expression")
parser.add_argument("--simplify_conditions", action="store_true", help="boolean conditions will be simplified when it is permissable; see docs/ai_notes.txt for some warnings about this flag")
# Print version
parser.add_argument("--version", action="version", version='%(prog)s - Version 1.0')
# Parse arguments
args = parser.parse_args()
return args
if __name__ == '__main__':
# Parse the arguments
args = parseArguments()
# Build the enemy name table
scr_names = unpack_EO_name_table.EO_name_table()
scr_names.build_from_file("EO3/Enemy/enemynametable.tbl", 2, False)
# Build the enemy skill name table
scr_skill_names = unpack_EO_name_table.EO_name_table()
scr_skill_names.build_from_file("EO3/Skill/enemyskillnametable.tbl", 2, False)
# Build the player skill name table
scrn_skill_names = unpack_EO_name_table.EO_name_table()
scrn_skill_names.build_from_file("EO3/Skill/playerskillnametable.tbl", 2, False)
# Build the procedure name list for enemies
scr_proc_list = unpack_ai_proc_list.get_procedure_names("EO3/AI/BtlScrFileTable.tbl", scr_names.size)
# holds all info in, and determined about a single AI file
class AI_Info():
# return a string with the full output destiation, including path and filename
def get_full_output_name(self):
# common directory
output = "out_EO3/AI/decompiled/"
# subdirectory based on type
if self.type == "scr":
output += "enemy/"
elif self.type == "scrn":
output += "ally/"
elif self.type == "scrb":
output += "summon/"
# name used is just the first in the possible name list, or the original filename if there is none
if self.possible_names:
output += self.possible_names[0].replace(' ', '_')
else:
output += self.filename[:-3]
# add a version number if necessary
if self.version > 0:
output += "_" + str(self.version)
# extension
output += ".txt"
return output
# computes everything about the ai from its file
def __init__(self, subdir, filename):
# name analysis
self.filename = filename
name_info = file[:-3].split('_', 2) # 'AI_scr?_name.bf'
self.type = name_info[1]
listed_proc_name = '_'.join(name_info[1:])
self.possible_names = []
for idx, proc_name in enumerate(scr_proc_list):
if proc_name == listed_proc_name:
self.possible_names.append( scr_names.names[idx] )
#if self.type == "scr" and not self.possible_names:
# print "No possible name found: " + self.filename
# TODO: this loop for sea allies and summons once a name list is found
# when there are multiple enemies with the same name, use a non-zero version to distinguish them
self.version = 0
self.flow = unpack_ai.Flow_File(os.path.join(subdir, filename))
self.basic_blocks, self.proc_info, self.special_labels = decompile_ai.abstract_flow(self.flow)
self.abst = decompile_ai.ABST(self.basic_blocks, self.proc_info, self.special_labels, False)
if args.fully_optimize:
self.abst.optimize_abst()
else:
self.abst.optimize_abst(args.flatten_conditionals, args.flatten_elses, args.constant_folding, args.simplify_conditions)
decompile_ai.set_game_specific_values("EO3")
ai_info = []
for subdir, dirs, files in os.walk('EO3/AI/'):
for file in files:
if file.endswith('.bf'):
# create the raw disassembly flow
ai_info.append( AI_Info(subdir, file) )
# adds versions to AIs with the same first possible name
# note that this is not particularly efficient
for out_idx, out_info in enumerate(ai_info):
matches = [out_info]
for in_idx, in_info in enumerate(ai_info):
if out_idx != in_idx and out_info.possible_names and in_info.possible_names:
if out_info.possible_names[0] == in_info.possible_names[0]:
matches.append(in_info)
if len(matches) > 1:
matches.sort(key=lambda i : i.filename)
for in_idx, in_info in enumerate(matches):
in_info.version = in_idx + 1
for info in ai_info:
# header info
output = []
if info.possible_names:
output += ["Name: " + info.possible_names[0] ]
if info.version > 0:
output[0] += " (version " + str(info.version) + ")"
output += ["Original filename: " + info.filename]
output += [""]
if info.type == "scr":
func_display = decompile_ai.get_enemy_function_formater(info.abst, scr_names.names, scr_skill_names.names)
output += [info.abst.display_decompilation(func_display)]
else:
output += [info.abst.display_decompilation()]
# Write decompilation to a file
with open(info.get_full_output_name(), "w") as f:
f.write( "\n".join(output) )
| #!/usr/bin/python
# coding: utf-8
# Decompiles all AI files, naming, to the extent possible, skills
# and the entities that use each AI file (if any)
# assumes directory structure:
# ./EO3/
# AI/
# BtlBSTScrFileTable.tbl
# BtlNPCScrFileTable.tbl
# BtlScrFileTable.tbl
# *.bf
# Skills/
# enemyskillnametable.tbl
# playerskillnametable.tbl
# ...
# Enemy/
# enemynametable.tbl
# ...
# ...
# out_EO3/
# AI/
# decompiled/
# enemy/
# ally/
# summon/
# ...
# ...
# written by TheOnlyOne (@modest_ralts)
import argparse
from sys import stderr
import os
import unpack_EO_name_table
import unpack_ai_proc_list
import unpack_ai
import decompile_ai
def parseArguments():
# Create argument parser
parser = argparse.ArgumentParser(description="Decompiles all Etrian Odyssey 3 AI files, naming skills and entities to the extent possible.")
parser.add_argument("--fully_optimize", action="store_true", help="all optimization passes will be performed on the code; specific optimization flags will be ignored")
parser.add_argument("--flatten_conditionals", action="store_true", help="(if . else if . else .) will be converted to (if . elif . else.) when permissable to reduce the nesting depth and resulting indentation of code")
parser.add_argument("--flatten_elses", action="store_true", help="(if t return else f ) will be converted to (if t return f) when permissable to reduce the nesting depth and resulting indentation of code")
parser.add_argument("--constant_folding", action="store_true", help="any arithmetic containing only constants will be replaced with the value of that expression")
parser.add_argument("--simplify_conditions", action="store_true", help="boolean conditions will be simplified when it is permissable; see docs/ai_notes.txt for some warnings about this flag")
# Print version
parser.add_argument("--version", action="version", version='%(prog)s - Version 1.0')
# Parse arguments
args = parser.parse_args()
return args
if __name__ == '__main__':
# Parse the arguments
args = parseArguments()
# Build the enemy name table
scr_names = unpack_EO_name_table.EO_name_table()
scr_names.build_from_file("EO3/Enemy/enemynametable.tbl", 2, False)
# Build the enemy skill name table
scr_skill_names = unpack_EO_name_table.EO_name_table()
scr_skill_names.build_from_file("EO3/Skill/enemyskillnametable.tbl", 2, False)
# Build the player skill name table
scrn_skill_names = unpack_EO_name_table.EO_name_table()
scrn_skill_names.build_from_file("EO3/Skill/playerskillnametable.tbl", 2, False)
# Build the procedure name list for enemies
scr_proc_list = unpack_ai_proc_list.get_procedure_names("EO3/AI/BtlScrFileTable.tbl", scr_names.size)
# holds all info in, and determined about a single AI file
class AI_Info():
# return a string with the full output destiation, including path and filename
def get_full_output_name(self):
# common directory
output = "out_EO3/AI/decompiled/"
# subdirectory based on type
if self.type == "scr":
output += "enemy/"
elif self.type == "scrn":
output += "ally/"
elif self.type == "scrb":
output += "summon/"
# name used is just the first in the possible name list, or the original filename if there is none
if self.possible_names:
output += self.possible_names[0].replace(' ', '_')
else:
output += self.filename[:-3]
# add a version number if necessary
if self.version > 0:
output += "_" + str(self.version)
# extension
output += ".txt"
return output
# computes everything about the ai from its file
def __init__(self, subdir, filename):
# name analysis
self.filename = filename
name_info = file[:-3].split('_', 2) # 'AI_scr?_name.bf'
self.type = name_info[1]
listed_proc_name = '_'.join(name_info[1:])
self.possible_names = []
for idx, proc_name in enumerate(scr_proc_list):
if proc_name == listed_proc_name:
self.possible_names.append( scr_names.names[idx] )
#if self.type == "scr" and not self.possible_names:
# print "No possible name found: " + self.filename
# TODO: this loop for sea allies and summons once a name list is found
# when there are multiple enemies with the same name, use a non-zero version to distinguish them
self.version = 0
self.flow = unpack_ai.Flow_File(os.path.join(subdir, filename))
self.basic_blocks, self.proc_info, self.special_labels = decompile_ai.abstract_flow(self.flow)
self.abst = decompile_ai.ABST(self.basic_blocks, self.proc_info, self.special_labels, False)
if args.fully_optimize:
self.abst.optimize_abst()
else:
self.abst.optimize_abst(args.flatten_conditionals, args.flatten_elses, args.constant_folding, args.simplify_conditions)
decompile_ai.set_game_specific_values("EO3")
ai_info = []
for subdir, dirs, files in os.walk('EO3/AI/'):
for file in files:
if file.endswith('.bf'):
# create the raw disassembly flow
ai_info.append( AI_Info(subdir, file) )
# adds versions to AIs with the same first possible name
# note that this is not particularly efficient
for out_idx, out_info in enumerate(ai_info):
matches = [out_info]
for in_idx, in_info in enumerate(ai_info):
if out_idx != in_idx and out_info.possible_names and in_info.possible_names:
if out_info.possible_names[0] == in_info.possible_names[0]:
matches.append(in_info)
if len(matches) > 1:
matches.sort(key=lambda i : i.filename)
for in_idx, in_info in enumerate(matches):
in_info.version = in_idx + 1
for info in ai_info:
# header info
output = []
if info.possible_names:
output += ["Name: " + info.possible_names[0] ]
if info.version > 0:
output[0] += " (version " + str(info.version) + ")"
output += ["Original filename: " + info.filename]
output += [""]
if info.type == "scr":
func_display = decompile_ai.get_enemy_function_formater(info.abst, scr_names.names, scr_skill_names.names)
output += [info.abst.display_decompilation(func_display)]
else:
output += [info.abst.display_decompilation()]
# Write decompilation to a file
with open(info.get_full_output_name(), "w") as f:
f.write( "\n".join(output) ) | en | 0.672329 | #!/usr/bin/python # coding: utf-8 # Decompiles all AI files, naming, to the extent possible, skills # and the entities that use each AI file (if any) # assumes directory structure: # ./EO3/ # AI/ # BtlBSTScrFileTable.tbl # BtlNPCScrFileTable.tbl # BtlScrFileTable.tbl # *.bf # Skills/ # enemyskillnametable.tbl # playerskillnametable.tbl # ... # Enemy/ # enemynametable.tbl # ... # ... # out_EO3/ # AI/ # decompiled/ # enemy/ # ally/ # summon/ # ... # ... # written by TheOnlyOne (@modest_ralts) # Create argument parser # Print version # Parse arguments # Parse the arguments # Build the enemy name table # Build the enemy skill name table # Build the player skill name table # Build the procedure name list for enemies # holds all info in, and determined about a single AI file # return a string with the full output destiation, including path and filename # common directory # subdirectory based on type # name used is just the first in the possible name list, or the original filename if there is none # add a version number if necessary # extension # computes everything about the ai from its file # name analysis # 'AI_scr?_name.bf' #if self.type == "scr" and not self.possible_names: # print "No possible name found: " + self.filename # TODO: this loop for sea allies and summons once a name list is found # when there are multiple enemies with the same name, use a non-zero version to distinguish them # create the raw disassembly flow # adds versions to AIs with the same first possible name # note that this is not particularly efficient # header info # Write decompilation to a file | 2.282127 | 2 |
pre-processing/prepare_data.py | ocastx/deepspeech-german | 0 | 6633028 | <reponame>ocastx/deepspeech-german
#! /usr/bin/env python
"""
1. Load all corpora where a path is given.
2. Clean transcriptions.
3. Merge all corpora
4. Create Train/Dev/Test splits
5. Export for DeepSpeech
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, os.path.pardir)))
import argparse
import audiomate
from audiomate.corpus import io
from audiomate.corpus import subset
import text_cleaning
def clean_transcriptions(corpus):
for utterance in corpus.utterances.values():
ll = utterance.label_lists[audiomate.corpus.LL_WORD_TRANSCRIPT]
for label in ll:
label.value = text_cleaning.clean_sentence(label.value)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare data for training.')
parser.add_argument('target_path', type=str)
parser.add_argument('--tuda', type=str)
parser.add_argument('--voxforge', type=str)
parser.add_argument('--swc', type=str)
parser.add_argument('--mailabs', type=str)
parser.add_argument('--cv', type=str)
args = parser.parse_args()
tuda_path = args.tuda
voxforge_path = args.voxforge
swc_path = args.swc
mailabs_path = args.mailabs
cv_path = args.cv
corpora = []
if tuda_path is not None:
tuda_corpus = audiomate.Corpus.load(tuda_path, reader='tuda')
corpora.append(tuda_corpus)
if voxforge_path is not None:
voxforge_corpus = audiomate.Corpus.load(
voxforge_path, reader='voxforge')
corpora.append(voxforge_corpus)
if swc_path is not None:
swc_corpus = audiomate.Corpus.load(swc_path, reader='kaldi')
corpora.append(swc_corpus)
if mailabs_path is not None:
mailabs_corpus = audiomate.Corpus.load(mailabs_path, reader='mailabs')
corpora.append(mailabs_corpus)
if cv_path is not None:
cv_corpus = audiomate.Corpus.load(cv_path, reader='common-voice')
corpora.append(cv_corpus)
if len(corpora) <= 0:
raise ValueError('No Corpus given!')
merged_corpus = audiomate.Corpus.merge_corpora(corpora)
clean_transcriptions(merged_corpus)
splitter = subset.Splitter(merged_corpus, random_seed=38)
splits = splitter.split_by_length_of_utterances(
{'train': 0.7, 'dev': 0.15, 'test': 0.15}, separate_issuers=True)
merged_corpus.import_subview('train', splits['train'])
merged_corpus.import_subview('dev', splits['dev'])
merged_corpus.import_subview('test', splits['test'])
deepspeech_writer = io.MozillaDeepSpeechWriter()
deepspeech_writer.save(merged_corpus, args.target_path)
| #! /usr/bin/env python
"""
1. Load all corpora where a path is given.
2. Clean transcriptions.
3. Merge all corpora
4. Create Train/Dev/Test splits
5. Export for DeepSpeech
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, os.path.pardir)))
import argparse
import audiomate
from audiomate.corpus import io
from audiomate.corpus import subset
import text_cleaning
def clean_transcriptions(corpus):
for utterance in corpus.utterances.values():
ll = utterance.label_lists[audiomate.corpus.LL_WORD_TRANSCRIPT]
for label in ll:
label.value = text_cleaning.clean_sentence(label.value)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare data for training.')
parser.add_argument('target_path', type=str)
parser.add_argument('--tuda', type=str)
parser.add_argument('--voxforge', type=str)
parser.add_argument('--swc', type=str)
parser.add_argument('--mailabs', type=str)
parser.add_argument('--cv', type=str)
args = parser.parse_args()
tuda_path = args.tuda
voxforge_path = args.voxforge
swc_path = args.swc
mailabs_path = args.mailabs
cv_path = args.cv
corpora = []
if tuda_path is not None:
tuda_corpus = audiomate.Corpus.load(tuda_path, reader='tuda')
corpora.append(tuda_corpus)
if voxforge_path is not None:
voxforge_corpus = audiomate.Corpus.load(
voxforge_path, reader='voxforge')
corpora.append(voxforge_corpus)
if swc_path is not None:
swc_corpus = audiomate.Corpus.load(swc_path, reader='kaldi')
corpora.append(swc_corpus)
if mailabs_path is not None:
mailabs_corpus = audiomate.Corpus.load(mailabs_path, reader='mailabs')
corpora.append(mailabs_corpus)
if cv_path is not None:
cv_corpus = audiomate.Corpus.load(cv_path, reader='common-voice')
corpora.append(cv_corpus)
if len(corpora) <= 0:
raise ValueError('No Corpus given!')
merged_corpus = audiomate.Corpus.merge_corpora(corpora)
clean_transcriptions(merged_corpus)
splitter = subset.Splitter(merged_corpus, random_seed=38)
splits = splitter.split_by_length_of_utterances(
{'train': 0.7, 'dev': 0.15, 'test': 0.15}, separate_issuers=True)
merged_corpus.import_subview('train', splits['train'])
merged_corpus.import_subview('dev', splits['dev'])
merged_corpus.import_subview('test', splits['test'])
deepspeech_writer = io.MozillaDeepSpeechWriter()
deepspeech_writer.save(merged_corpus, args.target_path) | en | 0.518926 | #! /usr/bin/env python 1. Load all corpora where a path is given. 2. Clean transcriptions. 3. Merge all corpora 4. Create Train/Dev/Test splits 5. Export for DeepSpeech | 2.453347 | 2 |
api/urls.py | prakash3720/django-rest | 0 | 6633029 | <filename>api/urls.py
from django.urls import path,include
from rest_framework.routers import DefaultRouter
from api import views
router=DefaultRouter()
router.register('profile',views.UserProfileViewSet)
router.register('todo',views.UserProfileTodoViewSet)
urlpatterns=[
path('login/',views.UserLoginApiView.as_view()),
path('',include(router.urls))
]
| <filename>api/urls.py
from django.urls import path,include
from rest_framework.routers import DefaultRouter
from api import views
router=DefaultRouter()
router.register('profile',views.UserProfileViewSet)
router.register('todo',views.UserProfileTodoViewSet)
urlpatterns=[
path('login/',views.UserLoginApiView.as_view()),
path('',include(router.urls))
]
| none | 1 | 2.121861 | 2 |
|
agregator/mainpage/models.py | tarasen1/Django-Agregator-Site | 0 | 6633030 | <filename>agregator/mainpage/models.py
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.contrib.auth.models import User
class Room(models.Model):
class Meta():
db_table = 'rooms'
def __str__(self):
return self.room_name
room_user = models.ForeignKey(User)
room_name = models.CharField(max_length=300)
room_descpription = models.TextField()
room_photo = models.ImageField(upload_to='mainImages')
room_city = models.CharField(max_length=40)
room_adress = models.CharField(max_length=100)
square = models.IntegerField(validators=[MinValueValidator(0)])
room_equipment = models.TextField(max_length=1000)
room_accesability = models.BooleanField()
room_light = models.IntegerField(validators=[MaxValueValidator(100), MinValueValidator(1)])
class Comment(models.Model):
class Meta():
db_table = 'comments'
comment_text = models.TextField()
comment_like = models.IntegerField(default = 0)
comment_article = models.ForeignKey(Room)
comment_user = models.ForeignKey(User)
| <filename>agregator/mainpage/models.py
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.contrib.auth.models import User
class Room(models.Model):
class Meta():
db_table = 'rooms'
def __str__(self):
return self.room_name
room_user = models.ForeignKey(User)
room_name = models.CharField(max_length=300)
room_descpription = models.TextField()
room_photo = models.ImageField(upload_to='mainImages')
room_city = models.CharField(max_length=40)
room_adress = models.CharField(max_length=100)
square = models.IntegerField(validators=[MinValueValidator(0)])
room_equipment = models.TextField(max_length=1000)
room_accesability = models.BooleanField()
room_light = models.IntegerField(validators=[MaxValueValidator(100), MinValueValidator(1)])
class Comment(models.Model):
class Meta():
db_table = 'comments'
comment_text = models.TextField()
comment_like = models.IntegerField(default = 0)
comment_article = models.ForeignKey(Room)
comment_user = models.ForeignKey(User)
| none | 1 | 2.184784 | 2 |
|
src/tratamientos/migrations/0003_auto_20160515_2028.py | mava-ar/sgk | 0 | 6633031 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-15 23:28
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('tratamientos', '0002_auto_20160510_0022'),
]
operations = [
migrations.AddField(
model_name='sesion',
name='comienzo_el',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 5, 15, 23, 28, 35, 95147, tzinfo=utc), verbose_name='fecha y hora de comienzo de sesión'),
preserve_default=False,
),
migrations.AddField(
model_name='sesion',
name='fin_el',
field=models.DateTimeField(null=True, verbose_name='fecha y hora de fin de sesión'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-15 23:28
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('tratamientos', '0002_auto_20160510_0022'),
]
operations = [
migrations.AddField(
model_name='sesion',
name='comienzo_el',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 5, 15, 23, 28, 35, 95147, tzinfo=utc), verbose_name='fecha y hora de comienzo de sesión'),
preserve_default=False,
),
migrations.AddField(
model_name='sesion',
name='fin_el',
field=models.DateTimeField(null=True, verbose_name='fecha y hora de fin de sesión'),
),
] | en | 0.837737 | # -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-05-15 23:28 | 1.721824 | 2 |
pcp_sdn_source/pcp_sdn/dphelper.py | kamilburda/pcp-sdn | 2 | 6633032 | """
This module contains datapath-related functions for easier management.
"""
from collections import OrderedDict
#===============================================================================
def get_mac_addr_from_datapath(datapath):
"""
Return the MAC address from the datapath ID.
According to OpenFlow switch specification, the lower 48 bits of the datapath
ID contains the MAC address.
"""
mac_addr_int = datapath.id & 0x0000ffffffffffff
mac_addr = format(mac_addr_int, '02x')
return ':'.join(mac_addr[i:i+2] for i in range(0, 12, 2))
#===============================================================================
def add_flow_entry(datapath, match, actions, instructions=None, **kwargs):
"""
Add a flow table entry.
Use `**kwargs` to specify additional keyword arguments. Use the same keyword
arguments as in `OFPFlowMod`.
If `instructions` is None, install instruction that applies `actions` immediately.
If `instructions` is not None, `actions` is ignored.
"""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
if instructions is None:
instructions = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
message = parser.OFPFlowMod(datapath, command=ofproto.OFPFC_ADD, match=match,
instructions=instructions, **kwargs)
datapath.send_msg(message)
def remove_flow_entry(datapath, match, **kwargs):
"""
Remove a flow entry.
"""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
message = parser.OFPFlowMod(datapath, command=ofproto.OFPFC_DELETE,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
match=match, **kwargs)
datapath.send_msg(message)
def add_instruction_goto_next_table(datapath, table_id, next_table_id, match=None, **kwargs):
"""
Add an instruction in a flow table to go to another table.
If `match` is None, match all packets.
If 'priority' is not specified in `**kwargs`, it defaults to 0 (i.e. the
lowest priority).
"""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
if match is None:
# Match all
match = parser.OFPMatch()
if 'priority' not in kwargs:
kwargs['priority'] = 0
instructions = [parser.OFPInstructionGotoTable(next_table_id)]
message = parser.OFPFlowMod(datapath, table_id=table_id, command=ofproto.OFPFC_ADD,
match=match, instructions=instructions, **kwargs)
datapath.send_msg(message)
#===============================================================================
def send_packet(forwarder, packet_, out_port=None):
"""
Send the packet out the port on the specified forwarder.
If `out_port` is None, process the packet in the flow tables of the forwarder
(i.e. the `OFPP_TABLE` port is used).
"""
ofproto = forwarder.ofproto
parser = forwarder.ofproto_parser
if out_port is None:
out_port = ofproto.OFPP_TABLE
packet_.serialize()
actions = [parser.OFPActionOutput(out_port)]
packet_to_send = parser.OFPPacketOut(forwarder, buffer_id=ofproto.OFP_NO_BUFFER,
in_port=ofproto.OFPP_CONTROLLER, actions=actions, data=packet_.data)
forwarder.send_msg(packet_to_send)
#===============================================================================
def clear_datapath(datapath):
ofproto = datapath.ofproto
request = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, command=ofproto.OFPFC_DELETE,
out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY)
datapath.send_msg(request)
#===============================================================================
class FlowTableHelper(object):
"""
This class stores flow tables by their names and automatically assigns table
IDs to them sequentially.
"""
def __init__(self, flow_table_names, starting_index=0):
"""
Parameters:
* `flow_table_names` - A list of flow table names.
"""
self._flow_tables = OrderedDict( (key, index + starting_index) for index, key in enumerate(flow_table_names) )
self._flow_table_ids = self._flow_tables.values()
self._flow_table_ids_and_indexes = { table_id: index for index, table_id in enumerate(self._flow_table_ids) }
def __getitem__(self, table_name):
"""
Return the ID of the flow table specified by its name.
"""
return self._flow_tables[table_name]
def __setitem__(self, table_name, table_id):
"""
If the `table_name` is not defined, create a new flow table with the table
ID. If `table_id` is already used, `table_name` is an alias to the table ID.
"""
#FIXME: Need to update `self._flow_table_ids` and `self._flow_table_ids_and_indexes`
# so that `next_table_id` still works properly.
self._flow_tables[table_name] = table_id
def next_table_id(self, table_name_or_id):
"""
Return the next table ID from the table specified by its name or its ID.
"""
try:
# If no exception is raised, table name was passed
table_id = self._flow_tables[table_name_or_id]
except KeyError:
# Table ID was passed
table_id = table_name_or_id
if table_id not in self._flow_table_ids_and_indexes:
raise ValueError("invalid table ID")
if table_id >= self._flow_table_ids[-1]:
raise ValueError("no next table exists")
index = self._flow_table_ids_and_indexes[table_id]
return self._flow_table_ids[index + 1]
| """
This module contains datapath-related functions for easier management.
"""
from collections import OrderedDict
#===============================================================================
def get_mac_addr_from_datapath(datapath):
"""
Return the MAC address from the datapath ID.
According to OpenFlow switch specification, the lower 48 bits of the datapath
ID contains the MAC address.
"""
mac_addr_int = datapath.id & 0x0000ffffffffffff
mac_addr = format(mac_addr_int, '02x')
return ':'.join(mac_addr[i:i+2] for i in range(0, 12, 2))
#===============================================================================
def add_flow_entry(datapath, match, actions, instructions=None, **kwargs):
"""
Add a flow table entry.
Use `**kwargs` to specify additional keyword arguments. Use the same keyword
arguments as in `OFPFlowMod`.
If `instructions` is None, install instruction that applies `actions` immediately.
If `instructions` is not None, `actions` is ignored.
"""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
if instructions is None:
instructions = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
message = parser.OFPFlowMod(datapath, command=ofproto.OFPFC_ADD, match=match,
instructions=instructions, **kwargs)
datapath.send_msg(message)
def remove_flow_entry(datapath, match, **kwargs):
"""
Remove a flow entry.
"""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
message = parser.OFPFlowMod(datapath, command=ofproto.OFPFC_DELETE,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
match=match, **kwargs)
datapath.send_msg(message)
def add_instruction_goto_next_table(datapath, table_id, next_table_id, match=None, **kwargs):
"""
Add an instruction in a flow table to go to another table.
If `match` is None, match all packets.
If 'priority' is not specified in `**kwargs`, it defaults to 0 (i.e. the
lowest priority).
"""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
if match is None:
# Match all
match = parser.OFPMatch()
if 'priority' not in kwargs:
kwargs['priority'] = 0
instructions = [parser.OFPInstructionGotoTable(next_table_id)]
message = parser.OFPFlowMod(datapath, table_id=table_id, command=ofproto.OFPFC_ADD,
match=match, instructions=instructions, **kwargs)
datapath.send_msg(message)
#===============================================================================
def send_packet(forwarder, packet_, out_port=None):
"""
Send the packet out the port on the specified forwarder.
If `out_port` is None, process the packet in the flow tables of the forwarder
(i.e. the `OFPP_TABLE` port is used).
"""
ofproto = forwarder.ofproto
parser = forwarder.ofproto_parser
if out_port is None:
out_port = ofproto.OFPP_TABLE
packet_.serialize()
actions = [parser.OFPActionOutput(out_port)]
packet_to_send = parser.OFPPacketOut(forwarder, buffer_id=ofproto.OFP_NO_BUFFER,
in_port=ofproto.OFPP_CONTROLLER, actions=actions, data=packet_.data)
forwarder.send_msg(packet_to_send)
#===============================================================================
def clear_datapath(datapath):
ofproto = datapath.ofproto
request = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, command=ofproto.OFPFC_DELETE,
out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY)
datapath.send_msg(request)
#===============================================================================
class FlowTableHelper(object):
"""
This class stores flow tables by their names and automatically assigns table
IDs to them sequentially.
"""
def __init__(self, flow_table_names, starting_index=0):
"""
Parameters:
* `flow_table_names` - A list of flow table names.
"""
self._flow_tables = OrderedDict( (key, index + starting_index) for index, key in enumerate(flow_table_names) )
self._flow_table_ids = self._flow_tables.values()
self._flow_table_ids_and_indexes = { table_id: index for index, table_id in enumerate(self._flow_table_ids) }
def __getitem__(self, table_name):
"""
Return the ID of the flow table specified by its name.
"""
return self._flow_tables[table_name]
def __setitem__(self, table_name, table_id):
"""
If the `table_name` is not defined, create a new flow table with the table
ID. If `table_id` is already used, `table_name` is an alias to the table ID.
"""
#FIXME: Need to update `self._flow_table_ids` and `self._flow_table_ids_and_indexes`
# so that `next_table_id` still works properly.
self._flow_tables[table_name] = table_id
def next_table_id(self, table_name_or_id):
"""
Return the next table ID from the table specified by its name or its ID.
"""
try:
# If no exception is raised, table name was passed
table_id = self._flow_tables[table_name_or_id]
except KeyError:
# Table ID was passed
table_id = table_name_or_id
if table_id not in self._flow_table_ids_and_indexes:
raise ValueError("invalid table ID")
if table_id >= self._flow_table_ids[-1]:
raise ValueError("no next table exists")
index = self._flow_table_ids_and_indexes[table_id]
return self._flow_table_ids[index + 1]
| en | 0.687146 | This module contains datapath-related functions for easier management. #=============================================================================== Return the MAC address from the datapath ID. According to OpenFlow switch specification, the lower 48 bits of the datapath ID contains the MAC address. #=============================================================================== Add a flow table entry. Use `**kwargs` to specify additional keyword arguments. Use the same keyword arguments as in `OFPFlowMod`. If `instructions` is None, install instruction that applies `actions` immediately. If `instructions` is not None, `actions` is ignored. Remove a flow entry. Add an instruction in a flow table to go to another table. If `match` is None, match all packets. If 'priority' is not specified in `**kwargs`, it defaults to 0 (i.e. the lowest priority). # Match all #=============================================================================== Send the packet out the port on the specified forwarder. If `out_port` is None, process the packet in the flow tables of the forwarder (i.e. the `OFPP_TABLE` port is used). #=============================================================================== #=============================================================================== This class stores flow tables by their names and automatically assigns table IDs to them sequentially. Parameters: * `flow_table_names` - A list of flow table names. Return the ID of the flow table specified by its name. If the `table_name` is not defined, create a new flow table with the table ID. If `table_id` is already used, `table_name` is an alias to the table ID. #FIXME: Need to update `self._flow_table_ids` and `self._flow_table_ids_and_indexes` # so that `next_table_id` still works properly. Return the next table ID from the table specified by its name or its ID. # If no exception is raised, table name was passed # Table ID was passed | 2.593593 | 3 |
ndispers/media/crystals/_betaBBO_Tamosauskas2018.py | akihiko-shimura/ndispers | 4 | 6633033 | <reponame>akihiko-shimura/ndispers<gh_stars>1-10
import sympy
from ndispers._baseclass import Medium, wl, phi, theta, T, pi
from ndispers.helper import vars2
class BetaBBO(Medium):
"""
beta-BBO (beta-Ba B_2 O_4) crystal
- Point group : 3m
- Crystal system : Trigonal
- Dielectic principal axis, z // c-axis (x, y-axes are arbitrary)
- Negative uniaxial, with optic axis parallel to z-axis
- Tranparency range : 0.19 to 2.6 um
Dispersion formula for refractive index
---------------------------------------
n(wl_um) = sqrt(1 + B1_i*wl**2/(wl**2 - C1_i) + B2_i*wl**2/(wl**2 - C2_i) + B3_i*wl**2/(wl**2 - C3_i)) for i = o, e
Validity range
--------------
0.188 - 5.2 um
Ref
---
Tamošauskas, Gintaras, et al. "Transmittance and phase matching of BBO crystal in the 3-5 μm range and its application for the characterization of mid-infrared laser pulses." Optical Materials Express 8.6 (2018): 1410-1418.
dn/dT from Nikogosyan, <NAME>. "Beta barium borate (BBO)." Applied Physics A 52.6 (1991): 359-368.
Example
-------
>>> bbo = ndispers.media.crystals.BetaBBO_Tamosauskas2018()
>>> bbo.n(0.6, 0, 40, pol='o') # args: (wl_um, theta_rad, T_degC, pol)
>>> bbo.n(0.6, 0.5*pi, 40, pol='e') # along z-axis, it is pure e-ray.
>>> bbo.n(0.6, 0*pi, 40, pol='e') # for theta = 0 rad, it corresponds to o-ray.
>>> bbo.GVD(0.6, 0.3*pi, 40, pol='e')
>>> bbo.pmAngles_sfg(1.064, 1.064, 40, deg=True)
{'ooe': {'theta': [22.895], 'phi': None},
'eeo': {'theta': [], 'phi': None},
'oee': {'theta': [32.575], 'phi': None},
'eoe': {'theta': [32.575], 'phi': None},
'eoo': {'theta': [], 'phi': None},
'oeo': {'theta': [], 'phi': None}}
"""
__slots__ = ["_BetaBBO__plane", "_BetaBBO__theta_rad", "_BetaBBO__phi_rad",
"_B1_o", "_C1_o", "_B2_o", "_C2_o", "_B3_o", "_C3_o",
"_B1_e", "_C1_e", "_B2_e", "_C2_e", "_B3_e", "_C3_e",
"_dndT_o", "_dndT_e"]
def __init__(self):
super().__init__()
self._BetaBBO__plane = 'arb'
self._BetaBBO__theta_rad = 'var'
self._BetaBBO__phi_rad = 'arb'
""" Constants of dispersion formula """
# For ordinary ray
self._B1_o = 0.90291
self._C1_o = 0.003926
self._B2_o = 0.83155
self._C2_o = 0.018786
self._B3_o = 0.76536
self._C3_o = 60.01
# For extraordinary ray
self._B1_e = 1.151075
self._C1_e = 0.007142
self._B2_e = 0.21803
self._C2_e = 0.02259
self._B3_e = 0.656
self._C3_e = 263
# dn/dT
self._dndT_o = -16.6e-6 #/degC
self._dndT_e = -9.3e-6 #/degC
@property
def plane(self):
return self._BetaBBO__plane
@property
def theta_rad(self):
return self._BetaBBO__theta_rad
@property
def phi_rad(self):
return self._BetaBBO__phi_rad
@property
def constants(self):
print(vars2(self))
@property
def symbols(self):
return [wl, theta, phi, T]
@property
def constants(self):
msg = ["B1_o = %g" % self._B1_o]
msg += ["C1_o = %g" % self._C1_o]
msg += ["B2_o = %g" % self._B2_o]
msg += ["C2_o = %g" % self._C2_o]
msg += ["B3_o = %g" % self._B3_o]
msg += ["C3_o = %g" % self._C3_o]
msg += ["B1_e = %g" % self._B1_e]
msg += ["C1_e = %g" % self._C1_e]
msg += ["B2_e = %g" % self._B2_e]
msg += ["C2_e = %g" % self._C2_e]
msg += ["B3_e = %g" % self._B3_e]
msg += ["C3_e = %g" % self._C3_e]
msg += ["dn_o/dT = %g" % self._dndT_o]
msg += ["dn_e/dT = %g" % self._dndT_e]
print("\n".join(msg))
def n_o_expr(self):
""" Sympy expression, dispersion formula for o-ray """
return sympy.sqrt(1.0 + self._B1_o * wl**2/ (wl**2 - self._C1_o) + self._B2_o * wl**2/ (wl**2 - self._C2_o) + self._B3_o * wl**2/ (wl**2 - self._C3_o)) + self._dndT_o * (T - 20)
def n_e_expr(self):
""" Sympy expression, dispersion formula for theta=90 deg e-ray """
return sympy.sqrt(1.0 + self._B1_e * wl**2/ (wl**2 - self._C1_e) + self._B2_e * wl**2/ (wl**2 - self._C2_e) + self._B3_e * wl**2/ (wl**2 - self._C3_e)) + self._dndT_e * (T - 20)
def n_expr(self, pol):
""""
Sympy expression,
dispersion formula of a general ray with an angle theta to optic axis. If theta = 0, this expression reduces to 'no_expre'.
n(theta) = n_e / sqrt( sin(theta)**2 + (n_e/n_o)**2 * cos(theta)**2 )
"""
if pol == 'o':
return self.n_o_expr()
elif pol == 'e':
return self.n_e_expr() / sympy.sqrt( sympy.sin(theta)**2 + (self.n_e_expr()/self.n_o_expr())**2 * sympy.cos(theta)**2 )
else:
raise ValueError("pol = '%s' must be 'o' or 'e'" % pol)
def n(self, wl_um, theta_rad, T_degC, pol='o'):
"""
Refractive index as a function of wavelength, theta and phi angles for each eigen polarization of light.
input
------
wl_um : float or array_like, wavelength in um
theta_rad : float or array_like, 0 to pi radians
T_degC : float or array_like, temperature of crystal in degree C.
pol : {'o', 'e'}, optional, polarization of light
return
-------
Refractive index, float or array_like
"""
return super().n(wl_um, theta_rad, 0, T_degC, pol=pol)
def dn_wl(self, wl_um, theta_rad, T_degC, pol='o'):
return super().dn_wl(wl_um, theta_rad, 0, T_degC, pol=pol)
def d2n_wl(self, wl_um, theta_rad, T_degC, pol='o'):
return super().d2n_wl(wl_um, theta_rad, 0, T_degC, pol=pol)
def d3n_wl(self, wl_um, theta_rad, T_degC, pol='o'):
return super().d3n_wl(wl_um, theta_rad, 0, T_degC, pol=pol)
def GD(self, wl_um, theta_rad, T_degC, pol='o'):
"""Group Delay [fs/mm]"""
return super().GD(wl_um, theta_rad, 0, T_degC, pol=pol)
def GV(self, wl_um, theta_rad, T_degC, pol='o'):
"""Group Velocity [um/fs]"""
return super().GV(wl_um, theta_rad, 0, T_degC, pol=pol)
def ng(self, wl_um, theta_rad, T_degC, pol='o'):
"""Group index, c/Group velocity"""
return super().ng(wl_um, theta_rad, 0, T_degC, pol=pol)
def GVD(self, wl_um, theta_rad, T_degC, pol='o'):
"""Group Delay Dispersion [fs^2/mm]"""
return super().GVD(wl_um, theta_rad, 0, T_degC, pol=pol)
def TOD(self, wl_um, theta_rad, T_degC, pol='o'):
"""Third Order Dispersion [fs^3/mm]"""
return super().TOD(wl_um, theta_rad, 0, T_degC, pol=pol)
def dndT(self, wl_um, theta_rad, T_degC, pol='o'):
return super().dndT(wl_um, theta_rad, 0, T_degC, pol=pol) | import sympy
from ndispers._baseclass import Medium, wl, phi, theta, T, pi
from ndispers.helper import vars2
class BetaBBO(Medium):
"""
beta-BBO (beta-Ba B_2 O_4) crystal
- Point group : 3m
- Crystal system : Trigonal
- Dielectic principal axis, z // c-axis (x, y-axes are arbitrary)
- Negative uniaxial, with optic axis parallel to z-axis
- Tranparency range : 0.19 to 2.6 um
Dispersion formula for refractive index
---------------------------------------
n(wl_um) = sqrt(1 + B1_i*wl**2/(wl**2 - C1_i) + B2_i*wl**2/(wl**2 - C2_i) + B3_i*wl**2/(wl**2 - C3_i)) for i = o, e
Validity range
--------------
0.188 - 5.2 um
Ref
---
Tamošauskas, Gintaras, et al. "Transmittance and phase matching of BBO crystal in the 3-5 μm range and its application for the characterization of mid-infrared laser pulses." Optical Materials Express 8.6 (2018): 1410-1418.
dn/dT from Nikogosyan, <NAME>. "Beta barium borate (BBO)." Applied Physics A 52.6 (1991): 359-368.
Example
-------
>>> bbo = ndispers.media.crystals.BetaBBO_Tamosauskas2018()
>>> bbo.n(0.6, 0, 40, pol='o') # args: (wl_um, theta_rad, T_degC, pol)
>>> bbo.n(0.6, 0.5*pi, 40, pol='e') # along z-axis, it is pure e-ray.
>>> bbo.n(0.6, 0*pi, 40, pol='e') # for theta = 0 rad, it corresponds to o-ray.
>>> bbo.GVD(0.6, 0.3*pi, 40, pol='e')
>>> bbo.pmAngles_sfg(1.064, 1.064, 40, deg=True)
{'ooe': {'theta': [22.895], 'phi': None},
'eeo': {'theta': [], 'phi': None},
'oee': {'theta': [32.575], 'phi': None},
'eoe': {'theta': [32.575], 'phi': None},
'eoo': {'theta': [], 'phi': None},
'oeo': {'theta': [], 'phi': None}}
"""
__slots__ = ["_BetaBBO__plane", "_BetaBBO__theta_rad", "_BetaBBO__phi_rad",
"_B1_o", "_C1_o", "_B2_o", "_C2_o", "_B3_o", "_C3_o",
"_B1_e", "_C1_e", "_B2_e", "_C2_e", "_B3_e", "_C3_e",
"_dndT_o", "_dndT_e"]
def __init__(self):
super().__init__()
self._BetaBBO__plane = 'arb'
self._BetaBBO__theta_rad = 'var'
self._BetaBBO__phi_rad = 'arb'
""" Constants of dispersion formula """
# For ordinary ray
self._B1_o = 0.90291
self._C1_o = 0.003926
self._B2_o = 0.83155
self._C2_o = 0.018786
self._B3_o = 0.76536
self._C3_o = 60.01
# For extraordinary ray
self._B1_e = 1.151075
self._C1_e = 0.007142
self._B2_e = 0.21803
self._C2_e = 0.02259
self._B3_e = 0.656
self._C3_e = 263
# dn/dT
self._dndT_o = -16.6e-6 #/degC
self._dndT_e = -9.3e-6 #/degC
@property
def plane(self):
return self._BetaBBO__plane
@property
def theta_rad(self):
return self._BetaBBO__theta_rad
@property
def phi_rad(self):
return self._BetaBBO__phi_rad
@property
def constants(self):
print(vars2(self))
@property
def symbols(self):
return [wl, theta, phi, T]
@property
def constants(self):
msg = ["B1_o = %g" % self._B1_o]
msg += ["C1_o = %g" % self._C1_o]
msg += ["B2_o = %g" % self._B2_o]
msg += ["C2_o = %g" % self._C2_o]
msg += ["B3_o = %g" % self._B3_o]
msg += ["C3_o = %g" % self._C3_o]
msg += ["B1_e = %g" % self._B1_e]
msg += ["C1_e = %g" % self._C1_e]
msg += ["B2_e = %g" % self._B2_e]
msg += ["C2_e = %g" % self._C2_e]
msg += ["B3_e = %g" % self._B3_e]
msg += ["C3_e = %g" % self._C3_e]
msg += ["dn_o/dT = %g" % self._dndT_o]
msg += ["dn_e/dT = %g" % self._dndT_e]
print("\n".join(msg))
def n_o_expr(self):
""" Sympy expression, dispersion formula for o-ray """
return sympy.sqrt(1.0 + self._B1_o * wl**2/ (wl**2 - self._C1_o) + self._B2_o * wl**2/ (wl**2 - self._C2_o) + self._B3_o * wl**2/ (wl**2 - self._C3_o)) + self._dndT_o * (T - 20)
def n_e_expr(self):
""" Sympy expression, dispersion formula for theta=90 deg e-ray """
return sympy.sqrt(1.0 + self._B1_e * wl**2/ (wl**2 - self._C1_e) + self._B2_e * wl**2/ (wl**2 - self._C2_e) + self._B3_e * wl**2/ (wl**2 - self._C3_e)) + self._dndT_e * (T - 20)
def n_expr(self, pol):
""""
Sympy expression,
dispersion formula of a general ray with an angle theta to optic axis. If theta = 0, this expression reduces to 'no_expre'.
n(theta) = n_e / sqrt( sin(theta)**2 + (n_e/n_o)**2 * cos(theta)**2 )
"""
if pol == 'o':
return self.n_o_expr()
elif pol == 'e':
return self.n_e_expr() / sympy.sqrt( sympy.sin(theta)**2 + (self.n_e_expr()/self.n_o_expr())**2 * sympy.cos(theta)**2 )
else:
raise ValueError("pol = '%s' must be 'o' or 'e'" % pol)
def n(self, wl_um, theta_rad, T_degC, pol='o'):
"""
Refractive index as a function of wavelength, theta and phi angles for each eigen polarization of light.
input
------
wl_um : float or array_like, wavelength in um
theta_rad : float or array_like, 0 to pi radians
T_degC : float or array_like, temperature of crystal in degree C.
pol : {'o', 'e'}, optional, polarization of light
return
-------
Refractive index, float or array_like
"""
return super().n(wl_um, theta_rad, 0, T_degC, pol=pol)
def dn_wl(self, wl_um, theta_rad, T_degC, pol='o'):
return super().dn_wl(wl_um, theta_rad, 0, T_degC, pol=pol)
def d2n_wl(self, wl_um, theta_rad, T_degC, pol='o'):
return super().d2n_wl(wl_um, theta_rad, 0, T_degC, pol=pol)
def d3n_wl(self, wl_um, theta_rad, T_degC, pol='o'):
return super().d3n_wl(wl_um, theta_rad, 0, T_degC, pol=pol)
def GD(self, wl_um, theta_rad, T_degC, pol='o'):
"""Group Delay [fs/mm]"""
return super().GD(wl_um, theta_rad, 0, T_degC, pol=pol)
def GV(self, wl_um, theta_rad, T_degC, pol='o'):
"""Group Velocity [um/fs]"""
return super().GV(wl_um, theta_rad, 0, T_degC, pol=pol)
def ng(self, wl_um, theta_rad, T_degC, pol='o'):
"""Group index, c/Group velocity"""
return super().ng(wl_um, theta_rad, 0, T_degC, pol=pol)
def GVD(self, wl_um, theta_rad, T_degC, pol='o'):
"""Group Delay Dispersion [fs^2/mm]"""
return super().GVD(wl_um, theta_rad, 0, T_degC, pol=pol)
def TOD(self, wl_um, theta_rad, T_degC, pol='o'):
"""Third Order Dispersion [fs^3/mm]"""
return super().TOD(wl_um, theta_rad, 0, T_degC, pol=pol)
def dndT(self, wl_um, theta_rad, T_degC, pol='o'):
return super().dndT(wl_um, theta_rad, 0, T_degC, pol=pol) | en | 0.56011 | beta-BBO (beta-Ba B_2 O_4) crystal - Point group : 3m - Crystal system : Trigonal - Dielectic principal axis, z // c-axis (x, y-axes are arbitrary) - Negative uniaxial, with optic axis parallel to z-axis - Tranparency range : 0.19 to 2.6 um Dispersion formula for refractive index --------------------------------------- n(wl_um) = sqrt(1 + B1_i*wl**2/(wl**2 - C1_i) + B2_i*wl**2/(wl**2 - C2_i) + B3_i*wl**2/(wl**2 - C3_i)) for i = o, e Validity range -------------- 0.188 - 5.2 um Ref --- Tamošauskas, Gintaras, et al. "Transmittance and phase matching of BBO crystal in the 3-5 μm range and its application for the characterization of mid-infrared laser pulses." Optical Materials Express 8.6 (2018): 1410-1418. dn/dT from Nikogosyan, <NAME>. "Beta barium borate (BBO)." Applied Physics A 52.6 (1991): 359-368. Example ------- >>> bbo = ndispers.media.crystals.BetaBBO_Tamosauskas2018() >>> bbo.n(0.6, 0, 40, pol='o') # args: (wl_um, theta_rad, T_degC, pol) >>> bbo.n(0.6, 0.5*pi, 40, pol='e') # along z-axis, it is pure e-ray. >>> bbo.n(0.6, 0*pi, 40, pol='e') # for theta = 0 rad, it corresponds to o-ray. >>> bbo.GVD(0.6, 0.3*pi, 40, pol='e') >>> bbo.pmAngles_sfg(1.064, 1.064, 40, deg=True) {'ooe': {'theta': [22.895], 'phi': None}, 'eeo': {'theta': [], 'phi': None}, 'oee': {'theta': [32.575], 'phi': None}, 'eoe': {'theta': [32.575], 'phi': None}, 'eoo': {'theta': [], 'phi': None}, 'oeo': {'theta': [], 'phi': None}} Constants of dispersion formula # For ordinary ray # For extraordinary ray # dn/dT #/degC #/degC Sympy expression, dispersion formula for o-ray Sympy expression, dispersion formula for theta=90 deg e-ray " Sympy expression, dispersion formula of a general ray with an angle theta to optic axis. If theta = 0, this expression reduces to 'no_expre'. n(theta) = n_e / sqrt( sin(theta)**2 + (n_e/n_o)**2 * cos(theta)**2 ) Refractive index as a function of wavelength, theta and phi angles for each eigen polarization of light. input ------ wl_um : float or array_like, wavelength in um theta_rad : float or array_like, 0 to pi radians T_degC : float or array_like, temperature of crystal in degree C. pol : {'o', 'e'}, optional, polarization of light return ------- Refractive index, float or array_like Group Delay [fs/mm] Group Velocity [um/fs] Group index, c/Group velocity Group Delay Dispersion [fs^2/mm] Third Order Dispersion [fs^3/mm] | 2.508003 | 3 |
eslib.py | sweverett/CluStR | 6 | 6633034 | <reponame>sweverett/CluStR<gh_stars>1-10
import linmix # Kelly algorithm package ported to Python
import numpy as np
import numpy.random as npr
from scipy import stats
import scipy.optimize as sop
from inputParameters import beta1, beta2
npr.seed(800)
def scatter_cal(x,y,slope,intercept,dof):
sig2 = sum((np.array(y) - (slope*np.array(x)+intercept))**2) / dof
return np.sqrt(sig2)
def invScalingRelation(tInt,tSlope,tSig):
xs = 1.0 / (1.0 + beta2*(tSig**2)/(tSlope**2))
invInt = xs * ( - tInt / tSlope + beta1*(tSig**2)/(tSlope**2) )
invSlope = xs / tSlope
invSig = np.sqrt(xs * (tSig**2) / (tSlope**2) )
return invInt, invSlope, invSig
def ninvScalingRelation(tInt,tSlope,tSig):
invInt = ( - tInt / tSlope )
invSlope = 1.0 / tSlope
invSig = np.sqrt( (tSig**2) / (tSlope**2) )
return invInt, invSlope, invSig
def obsScalingRelation(tInt1,tSlope1,tSig1,tInt2,tSlope2,tSig2,r):
# First order approximation
invInt1 = ( - tInt1 / tSlope1 + beta1*(tSig1**2)/(tSlope1**2) )
invSlope1 = 1.0 / tSlope1
invSig1 = np.sqrt( (tSig1**2) / (tSlope1**2) )
invSig2 = np.sqrt( (tSig2**2) / (tSlope2**2) )
x1 = 1.0 / (1.0 + beta2*invSig1**2)
inter = tInt2 + x1*tSlope2*( invInt1 \
- (r * invSig1 * invSig2) \
* ( beta1 + beta2 * tInt1 / tSlope1) )
slope = x1 * tSlope2 * ( invSlope1 \
+ beta2 * r * invSig1 * invSig2 / tSlope1 )
sig = tSlope2 * np.sqrt(x1) *\
np.sqrt( invSig2**2 + invSig1**2 - 2*r*invSig1*invSig2\
+ beta2*invSig1**2*invSig2**2*(1.-r**2) )
return inter, slope, sig
def nobsScalingRelation(tInt1,tSlope1,tSig1,tInt2,tSlope2,tSig2,r):
# First order approximation
invInt1 = ( - tInt1 / tSlope1 )
invSlope1 = 1.0 / tSlope1
invSig1 = np.sqrt( (tSig1**2) / (tSlope1**2) )
invSig2 = np.sqrt( (tSig2**2) / (tSlope2**2) )
inter = tInt2 + tSlope2*( invInt1 )
slope = tSlope2 * ( invSlope1 )
sig = tSlope2 * np.sqrt( invSig2**2 + invSig1**2 - 2*r*invSig1*invSig2 )
return inter, slope, sig
def findY(Y,invSig):
xs = 1.0 / (1.0 + beta2*Y**2)
f = invSig - np.sqrt(xs * Y**2 )
return f
def solveForZ_old(Z,Y,sigZY,slopeZY,ySlope,r):
xsy = 1.0 / (1.0 + beta2*Y**2)
slopeZ = slopeZY * ySlope / xsy / (1.0 + r*beta2*Y*Z)
f = sigZY**2 - slopeZ**2 * xsy * \
( Y**2 + Z**2 - 2.*r*Y*Z + beta2*(Y**2)*(Z**2)*(1.-r**2) )
return f
def solveForZ(Y,sigZY,slopeZY,ySlope,r):
p0 = slopeZY**2*ySlope**2*(1.0 + beta2*Y**2*(1.-r**2))
p1 = -slopeZY**2*ySlope**2*2.*r*Y - sigZY**2*beta2*r*Y
p2 = slopeZY**2*ySlope**2*Y**2 - sigZY**2
Z1,Z2 = np.roots([p0,p1,p2])
if np.iscomplex(Z1): return 0.,0.
return Z1,Z2
# calculate the true intercept, slope, and scatter of inverse of scaling
# relation assuming beta1 and beta2 is known (E14 notation)
def inferScalingRelationThroughInverse(infInt,infSlope,infSig):
Y = sop.fsolve(findY,infInt/infSlope,args=infSig)[0] #sig / slope
xs = 1.0 / (1.0 + beta2*Y**2)
Slope = xs / infSlope
Scatter = Y * Slope
Intercept = - Slope * (infInt / xs - beta1 * Y**2)
return Intercept, Slope, Scatter #OK
def inferScalingRelationThroughHidenVaribale(\
infInt, infSlope, infSig, yInt, ySlope, ySig, r, gInt, gSlope, gSig,\
Zg=0.0):
Y = ySig / ySlope #sig / slope
xsy = 1.0 / (1.0 + beta2*Y**2)
#Z = gSig / gSlope #initial guess
#Z = sop.fsolve(solveForZ,Z,args=(Y,infSig,infSlope,ySlope,r))[0]
Z1,Z2 = solveForZ(Y,infSig,infSlope,ySlope,r)
if (Z1 > Z2 ): Z = Z1
else: Z = Z2
#if (Zg <= 0.0): Z = Z1
#else: Z = Z2
#if (Z1 <= 0.0):
# if (Z2 <= 0.0): Z = 0.
# else: Z = Z2
#else:
# if (Z2 <= 0.0): Z = Z1
# else:
# if (Z1 > Z2): Z = Z1
# else: Z = Z2
Slope = infSlope * ySlope / xsy / (1.0 + r*beta2*Y*Z)
Scatter = Z * Slope
invyInt = ( - yInt/ySlope + beta1*(ySig**2)/(ySlope**2) )
Intercept = infInt - Slope*xsy*(invyInt - r*Y*Z*(beta1 + beta2*yInt/ySlope))
return Intercept, Slope, Scatter, Z #OK
#Y = ySig / ySlope #sig / slope
#xsy = 1.0 / (1.0 + beta2*Y**2)
#Z = sop.fsolve(solveForZ,-10.0,args=(Y,infSig,infSlope,ySlope,r))[0]
#Slope1 = infSlope * ySlope / xsy / (1.0 + r*beta2*Y*Z)
#Scatter1 = Z * Slope
#Z = sop.fsolve(solveForZ,5.,args=(Y,infSig,infSlope,ySlope,r))[0]
#Slope = infSlope * ySlope / xsy / (1.0 + r*beta2*Y*Z)
#Scatter = Z * Slope
#invyInt = ( - yInt/ySlope + beta1*(ySig**2)/(ySlope**2) )
#Intercept = infInt - Slope*xsy*(invyInt - r*Y*Z*(beta1 + beta2*yInt/ySlope))
#return Intercept, Slope, Scatter #OK
#def makeLinearRegression(xObs,yObs,xerr,yerr):
# print len(xObs), len(yObs), len(xerr), len(yerr)
# delta = np.ones(len(xerr)); xycov = np.zeros(len(xerr))
# model = linmix.LinMix(xObs,yObs,xerr,yerr,xycov,delta,2,2)
"""
Args:
x(array_like): The observed independent variable.
y(array_like): The observed dependent variable.
xsig(array_like): 1-sigma measurement errors in x.
ysig(array_like): 1-sigma measurement errors in y.
xycov(array_like): Covariance between the measurement errors in x and y.
delta(array_like): Array indicating whether a data point is
censored (i.e., not detected), or not.
If delta[i] == 1, then the ith source is detected.
If delta[i] == 0, then the ith source is not
detected and y[i] will be interpreted as an upper
limit. Note that if there are censored data points,
then the maximum-likelihood estimate (alpha, beta,
sigsqr) is not valid. By default,
all data points are assumed to be detected.
K(int): The number of Gaussians to use in the mixture model
for the distribution of xi.
nchains(int): The number of Monte Carlo Markov Chains to instantiate.
"""
def makeLinearRegression(xObs,yObs,xerr,yerr):
print len(xObs), len(yObs), len(xerr), len(yerr)
delta = np.ones(len(xerr)); xycov = np.zeros(len(xerr))
model = linmix.LinMix(xObs,yObs,xerr,yerr,xycov,delta,2,2)
model.run_mcmc(5000, 10000, silent=False)
# return intercept, slope, scatter
return model.chain['alpha'], model.chain['beta'],\
np.sqrt(model.chain['sigsqr'])
def makeOLR(x,y):
slope, intercept, r_value, p_value, _ = stats.linregress(x,y)
sig = scatter_cal(x,y,slope,intercept,len(x)-2)
return intercept, slope, sig
| import linmix # Kelly algorithm package ported to Python
import numpy as np
import numpy.random as npr
from scipy import stats
import scipy.optimize as sop
from inputParameters import beta1, beta2
npr.seed(800)
def scatter_cal(x,y,slope,intercept,dof):
sig2 = sum((np.array(y) - (slope*np.array(x)+intercept))**2) / dof
return np.sqrt(sig2)
def invScalingRelation(tInt,tSlope,tSig):
xs = 1.0 / (1.0 + beta2*(tSig**2)/(tSlope**2))
invInt = xs * ( - tInt / tSlope + beta1*(tSig**2)/(tSlope**2) )
invSlope = xs / tSlope
invSig = np.sqrt(xs * (tSig**2) / (tSlope**2) )
return invInt, invSlope, invSig
def ninvScalingRelation(tInt,tSlope,tSig):
invInt = ( - tInt / tSlope )
invSlope = 1.0 / tSlope
invSig = np.sqrt( (tSig**2) / (tSlope**2) )
return invInt, invSlope, invSig
def obsScalingRelation(tInt1,tSlope1,tSig1,tInt2,tSlope2,tSig2,r):
# First order approximation
invInt1 = ( - tInt1 / tSlope1 + beta1*(tSig1**2)/(tSlope1**2) )
invSlope1 = 1.0 / tSlope1
invSig1 = np.sqrt( (tSig1**2) / (tSlope1**2) )
invSig2 = np.sqrt( (tSig2**2) / (tSlope2**2) )
x1 = 1.0 / (1.0 + beta2*invSig1**2)
inter = tInt2 + x1*tSlope2*( invInt1 \
- (r * invSig1 * invSig2) \
* ( beta1 + beta2 * tInt1 / tSlope1) )
slope = x1 * tSlope2 * ( invSlope1 \
+ beta2 * r * invSig1 * invSig2 / tSlope1 )
sig = tSlope2 * np.sqrt(x1) *\
np.sqrt( invSig2**2 + invSig1**2 - 2*r*invSig1*invSig2\
+ beta2*invSig1**2*invSig2**2*(1.-r**2) )
return inter, slope, sig
def nobsScalingRelation(tInt1,tSlope1,tSig1,tInt2,tSlope2,tSig2,r):
# First order approximation
invInt1 = ( - tInt1 / tSlope1 )
invSlope1 = 1.0 / tSlope1
invSig1 = np.sqrt( (tSig1**2) / (tSlope1**2) )
invSig2 = np.sqrt( (tSig2**2) / (tSlope2**2) )
inter = tInt2 + tSlope2*( invInt1 )
slope = tSlope2 * ( invSlope1 )
sig = tSlope2 * np.sqrt( invSig2**2 + invSig1**2 - 2*r*invSig1*invSig2 )
return inter, slope, sig
def findY(Y,invSig):
xs = 1.0 / (1.0 + beta2*Y**2)
f = invSig - np.sqrt(xs * Y**2 )
return f
def solveForZ_old(Z,Y,sigZY,slopeZY,ySlope,r):
xsy = 1.0 / (1.0 + beta2*Y**2)
slopeZ = slopeZY * ySlope / xsy / (1.0 + r*beta2*Y*Z)
f = sigZY**2 - slopeZ**2 * xsy * \
( Y**2 + Z**2 - 2.*r*Y*Z + beta2*(Y**2)*(Z**2)*(1.-r**2) )
return f
def solveForZ(Y,sigZY,slopeZY,ySlope,r):
p0 = slopeZY**2*ySlope**2*(1.0 + beta2*Y**2*(1.-r**2))
p1 = -slopeZY**2*ySlope**2*2.*r*Y - sigZY**2*beta2*r*Y
p2 = slopeZY**2*ySlope**2*Y**2 - sigZY**2
Z1,Z2 = np.roots([p0,p1,p2])
if np.iscomplex(Z1): return 0.,0.
return Z1,Z2
# calculate the true intercept, slope, and scatter of inverse of scaling
# relation assuming beta1 and beta2 is known (E14 notation)
def inferScalingRelationThroughInverse(infInt,infSlope,infSig):
Y = sop.fsolve(findY,infInt/infSlope,args=infSig)[0] #sig / slope
xs = 1.0 / (1.0 + beta2*Y**2)
Slope = xs / infSlope
Scatter = Y * Slope
Intercept = - Slope * (infInt / xs - beta1 * Y**2)
return Intercept, Slope, Scatter #OK
def inferScalingRelationThroughHidenVaribale(\
infInt, infSlope, infSig, yInt, ySlope, ySig, r, gInt, gSlope, gSig,\
Zg=0.0):
Y = ySig / ySlope #sig / slope
xsy = 1.0 / (1.0 + beta2*Y**2)
#Z = gSig / gSlope #initial guess
#Z = sop.fsolve(solveForZ,Z,args=(Y,infSig,infSlope,ySlope,r))[0]
Z1,Z2 = solveForZ(Y,infSig,infSlope,ySlope,r)
if (Z1 > Z2 ): Z = Z1
else: Z = Z2
#if (Zg <= 0.0): Z = Z1
#else: Z = Z2
#if (Z1 <= 0.0):
# if (Z2 <= 0.0): Z = 0.
# else: Z = Z2
#else:
# if (Z2 <= 0.0): Z = Z1
# else:
# if (Z1 > Z2): Z = Z1
# else: Z = Z2
Slope = infSlope * ySlope / xsy / (1.0 + r*beta2*Y*Z)
Scatter = Z * Slope
invyInt = ( - yInt/ySlope + beta1*(ySig**2)/(ySlope**2) )
Intercept = infInt - Slope*xsy*(invyInt - r*Y*Z*(beta1 + beta2*yInt/ySlope))
return Intercept, Slope, Scatter, Z #OK
#Y = ySig / ySlope #sig / slope
#xsy = 1.0 / (1.0 + beta2*Y**2)
#Z = sop.fsolve(solveForZ,-10.0,args=(Y,infSig,infSlope,ySlope,r))[0]
#Slope1 = infSlope * ySlope / xsy / (1.0 + r*beta2*Y*Z)
#Scatter1 = Z * Slope
#Z = sop.fsolve(solveForZ,5.,args=(Y,infSig,infSlope,ySlope,r))[0]
#Slope = infSlope * ySlope / xsy / (1.0 + r*beta2*Y*Z)
#Scatter = Z * Slope
#invyInt = ( - yInt/ySlope + beta1*(ySig**2)/(ySlope**2) )
#Intercept = infInt - Slope*xsy*(invyInt - r*Y*Z*(beta1 + beta2*yInt/ySlope))
#return Intercept, Slope, Scatter #OK
#def makeLinearRegression(xObs,yObs,xerr,yerr):
# print len(xObs), len(yObs), len(xerr), len(yerr)
# delta = np.ones(len(xerr)); xycov = np.zeros(len(xerr))
# model = linmix.LinMix(xObs,yObs,xerr,yerr,xycov,delta,2,2)
"""
Args:
x(array_like): The observed independent variable.
y(array_like): The observed dependent variable.
xsig(array_like): 1-sigma measurement errors in x.
ysig(array_like): 1-sigma measurement errors in y.
xycov(array_like): Covariance between the measurement errors in x and y.
delta(array_like): Array indicating whether a data point is
censored (i.e., not detected), or not.
If delta[i] == 1, then the ith source is detected.
If delta[i] == 0, then the ith source is not
detected and y[i] will be interpreted as an upper
limit. Note that if there are censored data points,
then the maximum-likelihood estimate (alpha, beta,
sigsqr) is not valid. By default,
all data points are assumed to be detected.
K(int): The number of Gaussians to use in the mixture model
for the distribution of xi.
nchains(int): The number of Monte Carlo Markov Chains to instantiate.
"""
def makeLinearRegression(xObs,yObs,xerr,yerr):
print len(xObs), len(yObs), len(xerr), len(yerr)
delta = np.ones(len(xerr)); xycov = np.zeros(len(xerr))
model = linmix.LinMix(xObs,yObs,xerr,yerr,xycov,delta,2,2)
model.run_mcmc(5000, 10000, silent=False)
# return intercept, slope, scatter
return model.chain['alpha'], model.chain['beta'],\
np.sqrt(model.chain['sigsqr'])
def makeOLR(x,y):
slope, intercept, r_value, p_value, _ = stats.linregress(x,y)
sig = scatter_cal(x,y,slope,intercept,len(x)-2)
return intercept, slope, sig | en | 0.632181 | # Kelly algorithm package ported to Python # First order approximation # First order approximation # calculate the true intercept, slope, and scatter of inverse of scaling # relation assuming beta1 and beta2 is known (E14 notation) #sig / slope #OK #sig / slope #Z = gSig / gSlope #initial guess #Z = sop.fsolve(solveForZ,Z,args=(Y,infSig,infSlope,ySlope,r))[0] #if (Zg <= 0.0): Z = Z1 #else: Z = Z2 #if (Z1 <= 0.0): # if (Z2 <= 0.0): Z = 0. # else: Z = Z2 #else: # if (Z2 <= 0.0): Z = Z1 # else: # if (Z1 > Z2): Z = Z1 # else: Z = Z2 #OK #Y = ySig / ySlope #sig / slope #xsy = 1.0 / (1.0 + beta2*Y**2) #Z = sop.fsolve(solveForZ,-10.0,args=(Y,infSig,infSlope,ySlope,r))[0] #Slope1 = infSlope * ySlope / xsy / (1.0 + r*beta2*Y*Z) #Scatter1 = Z * Slope #Z = sop.fsolve(solveForZ,5.,args=(Y,infSig,infSlope,ySlope,r))[0] #Slope = infSlope * ySlope / xsy / (1.0 + r*beta2*Y*Z) #Scatter = Z * Slope #invyInt = ( - yInt/ySlope + beta1*(ySig**2)/(ySlope**2) ) #Intercept = infInt - Slope*xsy*(invyInt - r*Y*Z*(beta1 + beta2*yInt/ySlope)) #return Intercept, Slope, Scatter #OK #def makeLinearRegression(xObs,yObs,xerr,yerr): # print len(xObs), len(yObs), len(xerr), len(yerr) # delta = np.ones(len(xerr)); xycov = np.zeros(len(xerr)) # model = linmix.LinMix(xObs,yObs,xerr,yerr,xycov,delta,2,2) Args: x(array_like): The observed independent variable. y(array_like): The observed dependent variable. xsig(array_like): 1-sigma measurement errors in x. ysig(array_like): 1-sigma measurement errors in y. xycov(array_like): Covariance between the measurement errors in x and y. delta(array_like): Array indicating whether a data point is censored (i.e., not detected), or not. If delta[i] == 1, then the ith source is detected. If delta[i] == 0, then the ith source is not detected and y[i] will be interpreted as an upper limit. Note that if there are censored data points, then the maximum-likelihood estimate (alpha, beta, sigsqr) is not valid. By default, all data points are assumed to be detected. K(int): The number of Gaussians to use in the mixture model for the distribution of xi. nchains(int): The number of Monte Carlo Markov Chains to instantiate. # return intercept, slope, scatter | 2.540895 | 3 |
graphene/types/field.py | sebdiem/graphene | 1 | 6633035 | <gh_stars>1-10
import inspect
from collections import Mapping, OrderedDict
from functools import partial
from .argument import Argument, to_arguments
from .mountedtype import MountedType
from .structures import NonNull
from .unmountedtype import UnmountedType
from .utils import get_type
base_type = type
def source_resolver(source, root, info, **args):
resolved = getattr(root, source, None)
if inspect.isfunction(resolved) or inspect.ismethod(resolved):
return resolved()
return resolved
class Field(MountedType):
def __init__(self, type, args=None, resolver=None, source=None,
deprecation_reason=None, name=None, description=None,
required=False, _creation_counter=None, default_value=None,
**extra_args):
super(Field, self).__init__(_creation_counter=_creation_counter)
assert not args or isinstance(args, Mapping), (
'Arguments in a field have to be a mapping, received "{}".'
).format(args)
assert not (source and resolver), (
'A Field cannot have a source and a resolver in at the same time.'
)
assert not callable(default_value), (
'The default value can not be a function but received "{}".'
).format(base_type(default_value))
if required:
type = NonNull(type)
# Check if name is actually an argument of the field
if isinstance(name, (Argument, UnmountedType)):
extra_args['name'] = name
name = None
# Check if source is actually an argument of the field
if isinstance(source, (Argument, UnmountedType)):
extra_args['source'] = source
source = None
self.name = name
self._type = type
self.args = to_arguments(args or OrderedDict(), extra_args)
if source:
resolver = partial(source_resolver, source)
self.resolver = resolver
self.deprecation_reason = deprecation_reason
self.description = description
self.default_value = default_value
@property
def type(self):
return get_type(self._type)
def get_resolver(self, parent_resolver):
return self.resolver or parent_resolver
| import inspect
from collections import Mapping, OrderedDict
from functools import partial
from .argument import Argument, to_arguments
from .mountedtype import MountedType
from .structures import NonNull
from .unmountedtype import UnmountedType
from .utils import get_type
base_type = type
def source_resolver(source, root, info, **args):
resolved = getattr(root, source, None)
if inspect.isfunction(resolved) or inspect.ismethod(resolved):
return resolved()
return resolved
class Field(MountedType):
def __init__(self, type, args=None, resolver=None, source=None,
deprecation_reason=None, name=None, description=None,
required=False, _creation_counter=None, default_value=None,
**extra_args):
super(Field, self).__init__(_creation_counter=_creation_counter)
assert not args or isinstance(args, Mapping), (
'Arguments in a field have to be a mapping, received "{}".'
).format(args)
assert not (source and resolver), (
'A Field cannot have a source and a resolver in at the same time.'
)
assert not callable(default_value), (
'The default value can not be a function but received "{}".'
).format(base_type(default_value))
if required:
type = NonNull(type)
# Check if name is actually an argument of the field
if isinstance(name, (Argument, UnmountedType)):
extra_args['name'] = name
name = None
# Check if source is actually an argument of the field
if isinstance(source, (Argument, UnmountedType)):
extra_args['source'] = source
source = None
self.name = name
self._type = type
self.args = to_arguments(args or OrderedDict(), extra_args)
if source:
resolver = partial(source_resolver, source)
self.resolver = resolver
self.deprecation_reason = deprecation_reason
self.description = description
self.default_value = default_value
@property
def type(self):
return get_type(self._type)
def get_resolver(self, parent_resolver):
return self.resolver or parent_resolver | en | 0.759891 | # Check if name is actually an argument of the field # Check if source is actually an argument of the field | 2.447335 | 2 |
wfdb/readwrite/records.py | Chirayu-sopho/Sleep_Disorder_Classification | 0 | 6633036 | <reponame>Chirayu-sopho/Sleep_Disorder_Classification<gh_stars>0
# For wrheader(), all fields must be already filled in and cohesive with one another other. The signals field will not be used.
# For wrsamp(), the field to use will be d_signals (which is allowed to be empty for 0 channel records).
# set_p_features and set_d_features use characteristics of the p_signals or d_signals field to fill in other header fields.
# These are separate from another method 'setdefaults' which the user may call to set default header fields
# The checkfieldcohesion() function will be called in wrheader which checks all the header fields.
# The checksignalcohesion() function will be called in wrsamp in wrdat to check the d_signal against the header fields.
import numpy as np
import re
import os
import posixpath
from collections import OrderedDict
from calendar import monthrange
import requests
import multiprocessing
from . import _headers
from . import _signals
from . import downloads
# The base WFDB class to extend to create Record and MultiRecord. Contains shared helper functions and fields.
class BaseRecord(object):
# Constructor
def __init__(self, recordname=None, nsig=None,
fs=None, counterfreq=None, basecounter = None,
siglen = None, basetime = None, basedate = None,
comments = None, signame=None):
self.recordname = recordname
self.nsig = nsig
self.fs = fs
self.counterfreq = counterfreq
self.basecounter = basecounter
self.siglen = siglen
self.basetime = basetime
self.basedate = basedate
self.comments = comments
self.signame = signame
# Check whether a single field is valid in its basic form. Does not check compatibility with other fields.
# ch is only used for signal specification fields, specifying the channels to check. Other channels
# can be None.
# Be aware that this function is not just called from wrheader.
def checkfield(self, field, channels=None):
# Check that the field is present
if getattr(self, field) is None:
raise Exception("Missing field required: "+field)
# Check the type of the field (and of its elements if it should be a list)
self.checkfieldtype(field, channels)
# Expand to make sure all channels must have present field
if channels == 'all':
channels = [1]*len(getattr(self, field))
# Individual specific field checks:
if field == 'd_signals':
# Check shape
if self.d_signals.ndim != 2:
raise TypeError("d_signals must be a 2d numpy array")
# Check dtype
if self.d_signals.dtype not in [np.dtype('int64'), np.dtype('int32'), np.dtype('int16'), np.dtype('int8')]:
raise TypeError('d_signals must be a 2d numpy array with dtype == int64, int32, int16, or int8.')
elif field =='p_signals':
# Check shape
if self.p_signals.ndim != 2:
raise TypeError("p_signals must be a 2d numpy array")
elif field == 'e_d_signals':
# Check shape
for ch in range(len(channels)):
if self.e_d_signals[ch].ndim != 1:
raise TypeError("e_d_signals must be a list of 1d numpy arrays")
# Check dtype
if self.e_d_signals[ch].dtype not in [np.dtype('int64'), np.dtype('int32'), np.dtype('int16'), np.dtype('int8')]:
raise TypeError('e_d_d_signals must be a list of 1d numpy arrays with dtype == int64, int32, int16, or int8.')
elif field =='e_p_signals':
# Check shape
for ch in range(0, len(channels)):
if self.e_p_signals.ndim != 1:
raise TypeError("e_p_signals must be a list of 1d numpy arrays")
#elif field == 'segments': # Nothing to check here.
# Record specification fields
elif field == 'recordname':
# Allow letters, digits, hyphens, and underscores.
acceptedstring = re.match('[-\w]+', self.recordname)
if not acceptedstring or acceptedstring.string != self.recordname:
raise ValueError('recordname must only comprise of letters, digits, hyphens, and underscores.')
elif field == 'nseg':
if self.nseg <=0:
raise ValueError('nseg must be a positive integer')
elif field == 'nsig':
if self.nsig <=0:
raise ValueError('nsig must be a positive integer')
elif field == 'fs':
if self.fs<=0:
raise ValueError('fs must be a positive number')
elif field == 'counterfreq':
if self.counterfreq <=0:
raise ValueError('counterfreq must be a positive number')
elif field == 'basecounter':
if self.basecounter <=0:
raise ValueError('basecounter must be a positive number')
elif field == 'siglen':
if self.siglen <0:
raise ValueError('siglen must be a non-negative integer')
elif field == 'basetime':
_ = parsetimestring(self.basetime)
elif field == 'basedate':
_ = parsedatestring(self.basedate)
# Signal specification fields. Lists of elements to check.
elif field in _headers.sigfieldspecs:
for ch in range(0, len(channels)):
f = getattr(self, field)[ch]
# The channel element is allowed to be None
if not channels[ch]:
if f is None:
continue
if field == 'filename':
# Check for filename characters
acceptedstring = re.match('[-\w]+\.?[\w]+',f)
if not acceptedstring or acceptedstring.string != f:
raise ValueError('File names should only contain alphanumerics, hyphens, and an extension. eg. record_100.dat')
# Check that dat files are grouped together
if orderedsetlist(self.filename)[0] != orderednoconseclist(self.filename):
raise ValueError('filename error: all entries for signals that share a given file must be consecutive')
elif field == 'fmt':
if f not in _signals.datformats:
raise ValueError('File formats must be valid WFDB dat formats: '+' , '.join(_signals.datformats))
elif field == 'sampsperframe':
if f < 1:
raise ValueError('sampsperframe values must be positive integers')
elif field == 'skew':
if f < 0:
raise ValueError('skew values must be non-negative integers')
elif field == 'byteoffset':
if f < 0:
raise ValueError('byteoffset values must be non-negative integers')
elif field == 'adcgain':
if f <= 0:
raise ValueError('adcgain values must be positive numbers')
elif field == 'baseline':
# Currently original WFDB library only has 4 bytes for baseline.
if f < -2147483648 or f> 2147483648:
raise ValueError('baseline values must be between -2147483648 (-2^31) and 2147483647 (2^31 -1)')
elif field == 'units':
if re.search('\s', f):
raise ValueError('units strings may not contain whitespaces.')
elif field == 'adcres':
if f < 0:
raise ValueError('adcres values must be non-negative integers')
# elif field == 'adczero': nothing to check here
# elif field == 'initvalue': nothing to check here
# elif field == 'checksum': nothing to check here
elif field == 'blocksize':
if f < 0:
raise ValueError('blocksize values must be non-negative integers')
elif field == 'signame':
if re.search('\s', f):
raise ValueError('signame strings may not contain whitespaces.')
if len(set(self.signame)) != len(self.signame):
raise ValueError('signame strings must be unique.')
# Segment specification fields
elif field == 'segname':
# Segment names must be alphanumerics or just a single '~'
for f in self.segname:
if f == '~':
continue
acceptedstring = re.match('[-\w]+',f)
if not acceptedstring or acceptedstring.string != f:
raise ValueError("Non-null segment names may only contain alphanumerics and dashes. Null segment names must be set to '~'")
elif field == 'seglen':
# For records with more than 1 segment, the first segment may be
# the layout specification segment with a length of 0
if len(self.seglen)>1:
if self.seglen[0] < 0:
raise ValueError('seglen values must be positive integers. Only seglen[0] may be 0 to indicate a layout segment')
sl = self.seglen[1:]
else:
sl = self.seglen
for f in sl:
if f < 1:
raise ValueError('seglen values must be positive integers. Only seglen[0] may be 0 to indicate a layout segment')
# Comment field
elif field == 'comments':
for f in self.comments:
if f=='': # Allow empty string comment lines
continue
if f[0] == '#':
print("Note: comment strings do not need to begin with '#'. This library adds them automatically.")
if re.search('[\t\n\r\f\v]', f):
raise ValueError('comments may not contain tabs or newlines (they may contain spaces and underscores).')
# Check the data type of the specified field.
# ch is used for signal spec fields
# Some fields are lists. This must be checked, along with their elements.
def checkfieldtype(self, field, ch=None):
item = getattr(self, field)
# Record specification field. Nonlist.
if field in _headers.recfieldspecs:
checkitemtype(item, field, _headers.recfieldspecs[field].allowedtypes)
# Signal specification field. List.
elif field in _headers.sigfieldspecs:
checkitemtype(item, field, _headers.sigfieldspecs[field].allowedtypes, ch)
# Segment specification field. List. All elements cannot be None
elif field in _headers.segfieldspecs:
checkitemtype(item, field, _headers.segfieldspecs[field].allowedtypes, 'all')
# Comments field. List. Elements cannot be None
elif field == 'comments':
checkitemtype(item, field, (str), 'all')
# Signals field.
elif field in ['p_signals','d_signals']:
checkitemtype(item, field, (np.ndarray))
elif field in ['e_p_signals', 'e_d_signals']:
checkitemtype(item, field, (np.ndarray), 'all')
# Segments field. List. Elements may be None.
elif field == 'segments':
checkitemtype(item, field, (Record), 'none')
# Ensure that input read parameters are valid for the record
def checkreadinputs(self, sampfrom, sampto, channels, physical, m2s, smoothframes, returnres):
# Data Type Check
if not hasattr(sampfrom, '__index__'):
raise TypeError('sampfrom must be an integer')
if not hasattr(sampto, '__index__'):
raise TypeError('sampto must be an integer')
if not isinstance(channels, list):
raise TypeError('channels must be a list of integers')
# Duration Ranges
if sampfrom<0:
raise ValueError('sampfrom must be a non-negative integer')
if sampfrom>self.siglen:
raise ValueError('sampfrom must be shorter than the signal length')
if sampto<0:
raise ValueError('sampto must be a non-negative integer')
if sampto>self.siglen:
raise ValueError('sampto must be shorter than the signal length')
if sampto<=sampfrom:
raise ValueError('sampto must be greater than sampfrom')
# Channel Ranges
for c in channels:
if c<0:
raise ValueError('Input channels must all be non-negative integers')
if c>self.nsig-1:
raise ValueError('Input channels must all be lower than the total number of channels')
if returnres not in [64, 32, 16, 8]:
raise ValueError("returnres must be one of the following: 64, 32, 16, 8")
if physical is True and returnres == 8:
raise ValueError("returnres must be one of the following when physical is True: 64, 32, 16")
# Cannot expand multiple samples/frame for multi-segment records
if isinstance(self, MultiRecord):
# If m2s == True, Physical must be true. There is no
# meaningful representation of digital signals transferred
# from individual segments.
if m2s is True and physical is not True:
raise Exception('If m2s is True, physical must also be True.')
if smoothframes is False:
raise ValueError('This package version cannot expand all samples when reading multi-segment records. Must enable frame smoothing.')
# Check the item type. Vary the print message regarding whether the item can be None.
# Helper to checkfieldtype
# channels is a list of booleans indicating whether the field's channel must be present (1) or may be None (0)
# and is not just for signal specification fields
def checkitemtype(item, field, allowedtypes, channels=None):
# Checking the list
if channels is not None:
# First make sure the item is a list
if not isinstance(item, list):
raise TypeError("Field: '"+field+"' must be a list")
# Expand to make sure all channels must have present field
if channels == 'all':
channels = [1]*len(item)
# Expand to allow any channel to be None
if channels == 'none':
channels = [0]*len(item)
for ch in range(0, len(channels)):
mustexist=channels[ch]
# The field must exist for the channel
if mustexist:
if not isinstance(item[ch], allowedtypes):
raise TypeError("Channel "+str(ch)+" of field: '"+field+"' must be one of the following types:", allowedtypes)
# The field may be None for the channel
else:
if not isinstance(item[ch], allowedtypes) and item[ch] is not None:
raise TypeError("Channel "+str(ch)+" of field: '"+field+"' must be a 'None', or one of the following types:", allowedtypes)
# Single scalar to check
else:
if not isinstance(item, allowedtypes):
raise TypeError("Field: '"+field+"' must be one of the following types:", allowedtypes)
class Record(BaseRecord, _headers.HeadersMixin, _signals.SignalsMixin):
"""
The class representing WFDB headers, and single segment WFDB records.
Record objects can be created using the constructor, by reading a WFDB header
with 'rdheader', or a WFDB record (header and associated dat files) with rdsamp'
or 'srdsamp'.
The attributes of the Record object give information about the record as specified
by https://www.physionet.org/physiotools/wag/header-5.htm
In addition, the d_signals and p_signals attributes store the digital and physical
signals of WFDB records with at least one channel.
Contructor function:
def __init__(self, p_signals=None, d_signals=None,
recordname=None, nsig=None,
fs=None, counterfreq=None, basecounter=None,
siglen=None, basetime=None, basedate=None,
filename=None, fmt=None, sampsperframe=None,
skew=None, byteoffset=None, adcgain=None,
baseline=None, units=None, adcres=None,
adczero=None, initvalue=None, checksum=None,
blocksize=None, signame=None, comments=None)
Example Usage:
import wfdb
record = wfdb.Record(recordname='r1', fs=250, nsig=2, siglen=1000, filename=['r1.dat','r1.dat'])
"""
# Constructor
def __init__(self, p_signals=None, d_signals=None,
e_p_signals=None, e_d_signals=None,
recordname=None, nsig=None,
fs=None, counterfreq=None, basecounter=None,
siglen=None, basetime=None, basedate=None,
filename=None, fmt=None, sampsperframe=None,
skew=None, byteoffset=None, adcgain=None,
baseline=None, units=None, adcres=None,
adczero=None, initvalue=None, checksum=None,
blocksize=None, signame=None, comments=None):
# Note the lack of 'nseg' field. Single segment records cannot have this field. Even nseg = 1 makes
# the header a multi-segment header.
super(Record, self).__init__(recordname, nsig,
fs, counterfreq, basecounter, siglen,
basetime, basedate, comments, signame)
self.p_signals = p_signals
self.d_signals = d_signals
self.e_p_signals = e_p_signals
self.e_d_signals = e_d_signals
self.filename=filename
self.fmt=fmt
self.sampsperframe=sampsperframe
self.skew=skew
self.byteoffset=byteoffset
self.adcgain=adcgain
self.baseline=baseline
self.units=units
self.adcres=adcres
self.adczero=adczero
self.initvalue=initvalue
self.checksum=checksum
self.blocksize=blocksize
# Equal comparison operator for objects of this type
def __eq__(self, other):
att1 = self.__dict__
att2 = other.__dict__
if set(att1.keys()) != set(att2.keys()):
return False
for k in att1.keys():
v1 = att1[k]
v2 = att2[k]
if type(v1) != type(v2):
return False
if type(v1) == np.ndarray:
if not np.array_equal(v1, v2):
return False
else:
if v1 != v2:
return False
return True
# Write a wfdb header file and associated dat files if any.
# Uses d_signals (expanded=False) or e_d_signals to write the samples
def wrsamp(self, expanded=False):
# Perform field validity and cohesion checks, and write the header file.
self.wrheader()
if self.nsig>0:
# Perform signal validity and cohesion checks, and write the associated dat files.
self.wrdats(expanded)
# Arrange/edit object fields to reflect user channel and/or signal range input
# Account for case when signals are expanded
def arrangefields(self, channels, expanded=False):
# Rearrange signal specification fields
for field in _headers.sigfieldspecs:
item = getattr(self, field)
setattr(self, field, [item[c] for c in channels])
# Expanded signals - multiple samples per frame.
if expanded:
# Checksum and initvalue to be updated if present
# unless the whole signal length was input
if self.siglen != int(len(self.e_d_signals[0])/self.sampsperframe[0]):
self.checksum = self.calc_checksum(expanded)
self.initvalue = [s[0] for s in self.e_d_signals]
self.nsig = len(channels)
self.siglen = int(len(self.e_d_signals[0])/self.sampsperframe[0])
# MxN numpy array d_signals
else:
# Checksum and initvalue to be updated if present
# unless the whole signal length was input
if self.siglen != self.d_signals.shape[0]:
if self.checksum is not None:
self.checksum = self.calc_checksum()
if self.initvalue is not None:
ival = list(self.d_signals[0, :])
self.initvalue = [int(i) for i in ival]
# Update record specification parameters
# Important that these get updated after^^
self.nsig = len(channels)
self.siglen = self.d_signals.shape[0]
# Class for multi segment WFDB records.
class MultiRecord(BaseRecord, _headers.MultiHeadersMixin):
"""
The class representing multi-segment WFDB records.
MultiRecord objects can be created using the constructor, or by reading a multi-segment
WFDB record using 'rdsamp' with the 'm2s' (multi to single) input parameter set to False.
The attributes of the MultiRecord object give information about the entire record as specified
by https://www.physionet.org/physiotools/wag/header-5.htm
In addition, the 'segments' parameter is a list of Record objects representing each
individual segment, or 'None' representing empty segments, of the entire multi-segment record.
Noteably, this class has no attribute representing the signals as a whole. The 'multi_to_single'
instance method can be called on MultiRecord objects to return a single segment representation
of the record as a Record object. The resulting Record object will have its 'p_signals' field set.
Contructor function:
def __init__(self, segments=None, layout=None,
recordname=None, nsig=None, fs=None,
counterfreq=None, basecounter=None,
siglen=None, basetime=None, basedate=None,
segname=None, seglen=None, comments=None,
signame=None, sigsegments=None)
Example Usage:
import wfdb
recordM = wfdb.MultiRecord(recordname='rm', fs=50, nsig=8, siglen=9999,
segname=['rm_1', '~', rm_2'], seglen=[800, 200, 900])
recordL = wfdb.rdsamp('s00001-2896-10-10-00-31', m2s = False)
recordL = recordL.multi_to_single()
"""
# Constructor
def __init__(self, segments=None, layout=None,
recordname=None, nsig=None, fs=None,
counterfreq=None, basecounter=None,
siglen=None, basetime=None, basedate=None,
segname=None, seglen=None, comments=None,
signame=None, sigsegments=None):
super(MultiRecord, self).__init__(recordname, nsig,
fs, counterfreq, basecounter, siglen,
basetime, basedate, comments, signame)
self.layout = layout
self.segments = segments
self.segname = segname
self.seglen = seglen
self.sigsegments=sigsegments
# Write a multi-segment header, along with headers and dat files for all segments
def wrsamp(self):
# Perform field validity and cohesion checks, and write the header file.
self.wrheader()
# Perform record validity and cohesion checks, and write the associated segments.
for seg in self.segments:
seg.wrsamp()
# Check the cohesion of the segments field with other fields used to write the record
def checksegmentcohesion(self):
# Check that nseg is equal to the length of the segments field
if self.nseg != len(self.segments):
raise ValueError("Length of segments must match the 'nseg' field")
for i in range(0, nseg):
s = self.segments[i]
# If segment 0 is a layout specification record, check that its file names are all == '~''
if i==0 and self.seglen[0] == 0:
for filename in s.filename:
if filename != '~':
raise ValueError("Layout specification records must have all filenames named '~'")
# Check that sampling frequencies all match the one in the master header
if s.fs != self.fs:
raise ValueError("The 'fs' in each segment must match the overall record's 'fs'")
# Check the signal length of the segment against the corresponding seglen field
if s.siglen != self.seglen[i]:
raise ValueError('The signal length of segment '+str(i)+' does not match the corresponding segment length')
totalsiglen = totalsiglen + getattr(s, 'siglen')
# No need to check the sum of siglens from each segment object against siglen
# Already effectively done it when checking sum(seglen) against siglen
# Determine the segments and the samples
# within each segment that have to be read in a
# multi-segment record. Called during rdsamp.
def requiredsegments(self, sampfrom, sampto, channels):
# The starting segment with actual samples
if self.layout == 'Fixed':
startseg = 0
else:
startseg = 1
# Cumulative sum of segment lengths (ignoring layout segment)
cumsumlengths = list(np.cumsum(self.seglen[startseg:]))
# Get first segment
readsegs = [[sampfrom < cs for cs in cumsumlengths].index(True)]
# Get final segment
if sampto == cumsumlengths[len(cumsumlengths) - 1]:
readsegs.append(len(cumsumlengths) - 1)
else:
readsegs.append([sampto <= cs for cs in cumsumlengths].index(True))
# Add 1 for variable layout records
readsegs = list(np.add(readsegs,startseg))
# Obtain the sampfrom and sampto to read for each segment
if readsegs[1] == readsegs[0]:
# Only one segment to read
readsegs = [readsegs[0]]
# The segment's first sample number relative to the entire record
segstartsamp = sum(self.seglen[0:readsegs[0]])
readsamps = [[sampfrom-segstartsamp, sampto-segstartsamp]]
else:
# More than one segment to read
readsegs = list(range(readsegs[0], readsegs[1]+1))
readsamps = [[0, self.seglen[s]] for s in readsegs]
# Starting sample for first segment.
readsamps[0][0] = sampfrom - ([0] + cumsumlengths)[readsegs[0]-startseg]
# End sample for last segment
readsamps[-1][1] = sampto - ([0] + cumsumlengths)[readsegs[-1]-startseg]
return (readsegs, readsamps)
# Get the channel numbers to be read from each segment
def requiredsignals(self, readsegs, channels, dirname, pbdir):
# Fixed layout. All channels are the same.
if self.layout == 'Fixed':
# Should we bother here with skipping empty segments?
# They won't be read anyway.
readsigs = [channels]*len(readsegs)
# Variable layout: figure out channels by matching record names
else:
readsigs = []
# The overall layout signal names
l_signames = self.segments[0].signame
# The wanted signals
w_signames = [l_signames[c] for c in channels]
# For each segment ...
for i in range(0, len(readsegs)):
# Skip empty segments
if self.segname[readsegs[i]] == '~':
readsigs.append(None)
else:
# Get the signal names of the current segment
s_signames = rdheader(os.path.join(dirname, self.segname[readsegs[i]]), pbdir = pbdir).signame
readsigs.append(wanted_siginds(w_signames, s_signames))
return readsigs
# Arrange/edit object fields to reflect user channel and/or signal range input
def arrangefields(self, readsegs, segranges, channels):
# Update seglen values for relevant segments
for i in range(0, len(readsegs)):
self.seglen[readsegs[i]] = segranges[i][1] - segranges[i][0]
# Update record specification parameters
self.nsig = len(channels)
self.siglen = sum([sr[1]-sr[0] for sr in segranges])
# Get rid of the segments and segment line parameters
# outside the desired segment range
if self.layout == 'Fixed':
self.segments = self.segments[readsegs[0]:readsegs[-1]+1]
self.segname = self.segname[readsegs[0]:readsegs[-1]+1]
self.seglen = self.seglen[readsegs[0]:readsegs[-1]+1]
else:
# Keep the layout specifier segment
self.segments = [self.segments[0]] + self.segments[readsegs[0]:readsegs[-1]+1]
self.segname = [self.segname[0]] + self.segname[readsegs[0]:readsegs[-1]+1]
self.seglen = [self.seglen[0]] + self.seglen[readsegs[0]:readsegs[-1]+1]
# Update number of segments
self.nseg = len(self.segments)
# Convert a MultiRecord object to a Record object
def multi_to_single(self, returnres):
# The fields to transfer to the new object
fields = self.__dict__.copy()
# Remove multirecord fields
del(fields['segments'])
del(fields['segname'])
del(fields['seglen'])
del(fields['nseg'])
# The output physical signals
if returnres == 64:
floatdtype = 'float64'
elif returnres == 32:
floatdtype = 'float32'
else:
floatdtype = 'float16'
p_signals = np.zeros([self.siglen, self.nsig], dtype=floatdtype)
# Get the physical samples from each segment
# Start and end samples in the overall array
# to place the segment samples into
startsamps = [0] + list(np.cumsum(self.seglen)[0:-1])
endsamps = list(np.cumsum(self.seglen))
if self.layout == 'Fixed':
# Get the signal names and units from the first segment
fields['signame'] = self.segments[0].signame
fields['units'] = self.segments[0].units
for i in range(self.nseg):
p_signals[startsamps[i]:endsamps[i],:] = self.segments[i].p_signals
# For variable layout, have to get channels by name
else:
# Get the signal names from the layout segment
fields['signame'] = self.segments[0].signame
fields['units'] = self.segments[0].units
for i in range(1, self.nseg):
seg = self.segments[i]
# Empty segment
if seg is None:
p_signals[startsamps[i]:endsamps[i],:] = np.nan
# Non-empty segment
else:
# Figure out if there are any channels wanted and
# the output channels they are to be stored in
inchannels = []
outchannels = []
for s in fields['signame']:
if s in seg.signame:
inchannels.append(seg.signame.index(s))
outchannels.append(fields['signame'].index(s))
# Segment contains no wanted channels. Fill with nans.
if inchannels == []:
p_signals[startsamps[i]:endsamps[i],:] = np.nan
# Segment contains wanted channel(s). Transfer samples.
else:
# This statement is necessary in case this function is not called
# directly from rdsamp with m2s=True.
if not hasattr(seg, 'p_signals'):
seg.p_signals = seg.dac(returnres=returnres)
for ch in range(0, fields['nsig']):
if ch not in outchannels:
p_signals[startsamps[i]:endsamps[i],ch] = np.nan
else:
p_signals[startsamps[i]:endsamps[i],ch] = seg.p_signals[:, inchannels[outchannels.index(ch)]]
# Create the single segment Record object and set attributes
record = Record()
for field in fields:
setattr(record, field, fields[field])
record.p_signals = p_signals
return record
#------------------- Reading Records -------------------#
# Read a WFDB single or multi segment record. Return a Record or MultiRecord object
def rdsamp(recordname, sampfrom=0, sampto=None, channels = None, physical = True, pbdir = None,
m2s = True, smoothframes = True, ignoreskew=False, returnres=64):
"""Read a WFDB record and return the signal and record descriptors as attributes in a
Record or MultiRecord object.
Usage:
record = rdsamp(recordname, sampfrom=0, sampto=None, channels=None, physical=True, pbdir = None,
m2s=True, smoothframes = True, ignoreskew=False)
Input arguments:
- recordname (required): The name of the WFDB record to be read (without any file extensions).
If the argument contains any path delimiter characters, the argument will be interpreted as
PATH/baserecord and the data files will be searched for in the local path.
- sampfrom (default=0): The starting sample number to read for each channel.
- sampto (default=None): The sample number at which to stop reading for each channel.
- channels (default=all): Indices specifying the channel to be returned.
- physical (default=True): Flag that specifies whether to return signals in physical units in
the p_signals field (True), or digital units in the d_signals field (False).
- pbdir (default=None): Option used to stream data from Physiobank. The Physiobank database
directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb', pbdir = 'mitdb'.
- m2s (default=True): Flag used when reading multi-segment records. Specifies whether to
directly return a wfdb MultiRecord object (False), or to convert it into and return a wfdb
Record object (True).
- smoothframes (default=True): Flag used when reading records with signals having multiple
samples per frame. Specifies whether to smooth the samples in signals with more than
one sample per frame and return an mxn uniform numpy array as the d_signals or p_signals
field (True), or to return a list of 1d numpy arrays containing every expanded sample as
the e_d_signals or e_p_signals field (False).
- ignoreskew (default=False): Flag used when reading records with at least one skewed signal.
Specifies whether to apply the skew to align the signals in the output variable (False), or
to ignore the skew field and load in all values contained in the dat files unaligned (True).
- returnres (default=64): The numpy array dtype of the returned signals. Options are: 64, 32,
16, and 8, where the value represents the numpy int or float dtype. Note that the value
cannot be 8 when physical is True since there is no float8 format.
Output argument:
- record: The wfdb Record or MultiRecord object representing the contents of the record read.
Note: If a signal range or channel selection is specified when calling this function, the
the resulting attributes of the returned object will be set to reflect the section
of the record that is actually read, rather than necessarily what is in the header file.
For example, if channels = [0, 1, 2] is specified when reading a 12 channel record, the
'nsig' attribute will be 3, not 12.
Note: The 'srdsamp' function exists as a simple alternative to 'rdsamp' for the most common
purpose of extracting the physical signals and a few important descriptor fields.
'srdsamp' returns two arguments: the physical signals array, and a dictionary of a
few select fields, a subset of the original wfdb Record attributes.
Example Usage:
import wfdb
ecgrecord = wfdb.rdsamp('sampledata/test01_00s', sampfrom=800, channels = [1,3])
"""
dirname, baserecordname = os.path.split(recordname)
# Read the header fields into the appropriate record object
record = rdheader(recordname, pbdir = pbdir, rdsegments = False)
# Set defaults for sampto and channels input variables
if sampto is None:
sampto = record.siglen
if channels is None:
channels = list(range(record.nsig))
# Ensure that input fields are valid for the record
record.checkreadinputs(sampfrom, sampto, channels, physical, m2s, smoothframes, returnres)
# A single segment record
if isinstance(record, Record):
# Only 1 sample/frame, or frames are smoothed. Return uniform numpy array
if smoothframes or max([record.sampsperframe[c] for c in channels])==1:
# Read signals from the associated dat files that contain wanted channels
record.d_signals = _signals.rdsegment(record.filename, dirname, pbdir, record.nsig, record.fmt, record.siglen,
record.byteoffset, record.sampsperframe, record.skew,
sampfrom, sampto, channels, smoothframes, ignoreskew)
# Arrange/edit the object fields to reflect user channel and/or signal range input
record.arrangefields(channels, expanded=False)
if physical is True:
# Perform inplace dac to get physical signal
record.dac(expanded=False, returnres=returnres, inplace=True)
# Return each sample of the signals with multiple samples per frame
else:
record.e_d_signals = _signals.rdsegment(record.filename, dirname, pbdir, record.nsig, record.fmt, record.siglen,
record.byteoffset, record.sampsperframe, record.skew,
sampfrom, sampto, channels, smoothframes, ignoreskew)
# Arrange/edit the object fields to reflect user channel and/or signal range input
record.arrangefields(channels, expanded=True)
if physical is True:
# Perform dac to get physical signal
record.dac(expanded=True, returnres=returnres, inplace=True)
# A multi segment record
# We can make another rdsamp function (called rdsamp_segment) to call
# for individual segments to deal with the skews.
else:
# Strategy:
# 1. Read the required segments and store them in
# Record objects.
# 2. Update the parameters of the objects to reflect
# the state of the sections read.
# 3. Update the parameters of the overall MultiRecord
# object to reflect the state of the individual segments.
# 4. If specified, convert the MultiRecord object
# into a single Record object.
# Segments field is a list of Record objects
# Empty segments store None.
record.segments = [None]*record.nseg
# Variable layout
if record.seglen[0] == 0:
record.layout = 'Variable'
# Read the layout specification header
record.segments[0] = rdheader(os.path.join(dirname, record.segname[0]), pbdir=pbdir)
# Fixed layout
else:
record.layout = 'Fixed'
# The segment numbers and samples within each segment to read.
readsegs, segranges = record.requiredsegments(sampfrom, sampto, channels)
# The signals within each segment to read
segsigs = record.requiredsignals(readsegs, channels, dirname, pbdir)
# Read the desired samples in the relevant segments
for i in range(len(readsegs)):
segnum = readsegs[i]
# Empty segment or segment with no relevant channels
if record.segname[segnum] == '~' or segsigs[i] is None:
record.segments[segnum] = None
else:
record.segments[segnum] = rdsamp(os.path.join(dirname, record.segname[segnum]),
sampfrom = segranges[i][0], sampto = segranges[i][1],
channels = segsigs[i], physical = True, pbdir=pbdir)
# Arrange the fields of the overall object to reflect user input
record.arrangefields(readsegs, segranges, channels)
# Convert object into a single segment Record object
if m2s:
record = record.multi_to_single(returnres=returnres)
# Perform dtype conversion if necessary
if isinstance(record, Record) and record.nsig>0:
record.convert_dtype(physical, returnres, smoothframes)
return record
# Read a WFDB header. Return a Record object or MultiRecord object
def rdheader(recordname, pbdir = None, rdsegments = False):
"""Read a WFDB header file and return the record descriptors as attributes in a Record object
Usage:
record = rdheader(recordname, pbdir = None, rdsegments = False)
Input arguments:
- recordname (required): The name of the WFDB record to be read (without any file extensions).
If the argument contains any path delimiter characters, the argument will be interpreted as
PATH/baserecord and the header file will be searched for in the local path.
- pbdir (default=None): Option used to stream data from Physiobank. The Physiobank database
directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb', pbdir = 'mitdb'.
- rdsegments (default=False): Boolean flag used when reading multi-segment headers. If True,
segment headers will also be read (into the record object's 'segments' field).
Output argument:
- record: The wfdb Record or MultiRecord object representing the contents of the header read.
Example Usage:
import wfdb
ecgrecord = wfdb.rdheader('sampledata/test01_00s', sampfrom=800, channels = [1,3])
"""
# Read the header file. Separate comment and non-comment lines
headerlines, commentlines = _headers.getheaderlines(recordname, pbdir)
# Get fields from record line
d_rec = _headers.read_rec_line(headerlines[0])
# Processing according to whether the header is single or multi segment
# Single segment header - Process signal specification lines
if d_rec['nseg'] is None:
# Create a single-segment WFDB record object
record = Record()
# There is at least one channel
if len(headerlines)>1:
# Read the fields from the signal lines
d_sig = _headers.read_sig_lines(headerlines[1:])
# Set the object's signal line fields
for field in _headers.sigfieldspecs:
setattr(record, field, d_sig[field])
# Set the object's record line fields
for field in _headers.recfieldspecs:
if field == 'nseg':
continue
setattr(record, field, d_rec[field])
# Multi segment header - Process segment specification lines
else:
# Create a multi-segment WFDB record object
record = MultiRecord()
# Read the fields from the segment lines
d_seg = _headers.read_seg_lines(headerlines[1:])
# Set the object's segment line fields
for field in _headers.segfieldspecs:
setattr(record, field, d_seg[field])
# Set the objects' record line fields
for field in _headers.recfieldspecs:
setattr(record, field, d_rec[field])
# Determine whether the record is fixed or variable
if record.seglen[0] == 0:
record.layout = 'Variable'
else:
record.layout = 'Fixed'
# If specified, read the segment headers
if rdsegments:
record.segments = []
# Get the base record name (could be empty)
dirname = os.path.split(recordname)[0]
for s in record.segname:
if s == '~':
record.segments.append(None)
else:
record.segments.append(rdheader(os.path.join(dirname,s), pbdir))
# Fill in the signame attribute
record.signame = record.getsignames()
# Fill in the sigsegments attribute
record.sigsegments = record.getsigsegments()
# Set the comments field
record.comments = []
for line in commentlines:
record.comments.append(line.strip(' \t#'))
return record
# Given some wanted signal names, and the signal names contained
# in a record, return the indices of the record channels that intersect.
# Remember that the wanted signal names are already in order specified in user input channels. So it's good!
def wanted_siginds(wanted_signames, record_signames):
contained_signals = [s for s in wanted_signames if s in record_signames]
if contained_signals == []:
return None
else:
return [record_signames.index(s) for s in contained_signals]
# A simple version of rdsamp for ease of use
# Return the physical signals and a few essential fields
def srdsamp(recordname, sampfrom=0, sampto=None, channels = None, pbdir = None):
"""Read a WFDB record and return the physical signal and a few important descriptor fields
Usage:
signals, fields = srdsamp(recordname, sampfrom=0, sampto=None, channels=None, pbdir=None)
Input arguments:
- recordname (required): The name of the WFDB record to be read (without any file extensions).
If the argument contains any path delimiter characters, the argument will be interpreted as
PATH/baserecord and the data files will be searched for in the local path.
- sampfrom (default=0): The starting sample number to read for each channel.
- sampto (default=None): The sample number at which to stop reading for each channel.
- channels (default=all): Indices specifying the channel to be returned.
Output arguments:
- signals: A 2d numpy array storing the physical signals from the record.
- fields: A dictionary specifying several key attributes of the read record:
- fs: The sampling frequency of the record
- units: The units for each channel
- signame: The signal name for each channel
- comments: Any comments written in the header
Note: If a signal range or channel selection is specified when calling this function, the
the resulting attributes of the returned object will be set to reflect the section
of the record that is actually read, rather than necessarily what is in the header file.
For example, if channels = [0, 1, 2] is specified when reading a 12 channel record, the
'nsig' attribute will be 3, not 12.
Note: The 'rdsamp' function is the base function upon which this one is built. It returns
all attributes present, along with the signals, as attributes in a wfdb.Record object.
The function, along with the returned data type, have more options than 'srdsamp' for
users who wish to more directly manipulate WFDB files.
Example Usage:
import wfdb
sig, fields = wfdb.srdsamp('sampledata/test01_00s', sampfrom=800, channels = [1,3])
"""
record = rdsamp(recordname, sampfrom, sampto, channels, True, pbdir, True)
signals = record.p_signals
fields = {}
for field in ['fs','units','signame', 'comments']:
fields[field] = getattr(record, field)
return signals, fields
#------------------- /Reading Records -------------------#
# Function for writing single segment records
def wrsamp(recordname, fs, units, signames, p_signals=None, d_signals=None,
fmt=None, gain=None, baseline=None, comments=None, basetime=None,
basedate=None):
"""Write a single segment WFDB record, creating a WFDB header file and any associated dat files.
Usage:
wrsamp(recordname, fs, units, signames, p_signals = None, d_signals=None,
fmt = None, gain = None, baseline = None, comments = None)
Input arguments:
- recordname (required): The string name of the WFDB record to be written (without any file extensions).
- fs (required): The numerical sampling frequency of the record.
- units (required): A list of strings giving the units of each signal channel.
- signames (required): A list of strings giving the signal name of each signal channel.
- p_signals (default=None): An MxN 2d numpy array, where M is the signal length. Gives the physical signal
values intended to be written. Either p_signals or d_signals must be set, but not both. If p_signals
is set, this method will use it to perform analogue-digital conversion, writing the resultant digital
values to the dat file(s). If fmt is set, gain and baseline must be set or unset together. If fmt is
unset, gain and baseline must both be unset.
- d_signals (default=None): An MxN 2d numpy array, where M is the signal length. Gives the digital signal
values intended to be directly written to the dat file(s). The dtype must be an integer type. Either
p_signals or d_signals must be set, but not both. In addition, if d_signals is set, fmt, gain and baseline
must also all be set.
- fmt (default=None): A list of strings giving the WFDB format of each file used to store each channel.
Accepted formats are: "80","212","16","24", and "32". There are other WFDB formats but this library
will not write (though it will read) those file types.
- gain (default=None): A list of integers specifying the ADC gain.
- baseline (default=None): A list of integers specifying the digital baseline.
- comments (default=None): A list of string comments to be written to the header file.
- basetime (default=None): A string of the record's start time in 24h HH:MM:SS(.ms) format.
- basedate (default=None): A string of the record's start date in DD/MM/YYYY format.
Note: This gateway function was written to enable a simple way to write WFDB record files using
the most frequently used parameters. Therefore not all WFDB fields can be set via this function.
For more control over attributes, create a wfdb.Record object, manually set its attributes, and
call its wrsamp() instance method. If you choose this more advanced method, see also the setdefaults,
set_d_features, and set_p_features instance methods to help populate attributes.
Example Usage (with the most common scenario of input parameters):
import wfdb
# Read part of a record from Physiobank
sig, fields = wfdb.srdsamp('a103l', sampfrom = 50000, channels = [0,1], pbdir = 'challenge/2015/training')
# Write a local WFDB record (manually inserting fields)
wfdb.wrsamp('ecgrecord', fs = 250, units = ['mV', 'mV'], signames = ['I', 'II'], p_signals = sig, fmt = ['16', '16'])
"""
# Check input field combinations
if p_signals is not None and d_signals is not None:
raise Exception('Must only give one of the inputs: p_signals or d_signals')
if d_signals is not None:
if fmt is None or gain is None or baseline is None:
raise Exception("When using d_signals, must also specify 'fmt', 'gain', and 'baseline' fields.")
# Depending on whether d_signals or p_signals was used, set other required features.
if p_signals is not None:
# Create the Record object
record = Record(recordname=recordname, p_signals=p_signals, fs=fs,
fmt=fmt, units=units, signame=signames, adcgain = gain,
baseline=baseline, comments=comments, basetime=basetime,
basedate=basedate)
# Compute optimal fields to store the digital signal, carry out adc, and set the fields.
record.set_d_features(do_adc = 1)
else:
# Create the Record object
record = Record(recordname=recordname, d_signals=d_signals, fs=fs,
fmt=fmt, units=units, signame = signames, adcgain = gain,
baseline=baseline, comments=comments, basetime=basetime,
basedate=basedate)
# Use d_signals to set the fields directly
record.set_d_features()
# Set default values of any missing field dependencies
record.setdefaults()
# Write the record files - header and associated dat
record.wrsamp()
# Time string parser for WFDB header - H(H):M(M):S(S(.sss)) format.
def parsetimestring(timestring):
times = re.findall("(?P<hours>\d{1,2}):(?P<minutes>\d{1,2}):(?P<seconds>\d{1,2}[.\d+]*)", timestring)
if not times:
raise ValueError("Invalid time string: "+timestring+". Acceptable format is: 'Hours:Minutes:Seconds'")
else:
hours, minutes, seconds = times[0]
if not hours or not minutes or not seconds:
raise ValueError("Invalid time string: "+timestring+". Acceptable format is: 'Hours:Minutes:Seconds'")
hours = int(hours)
minutes = int(minutes)
seconds = float(seconds)
if int(hours) >23:
raise ValueError('hours must be < 24')
elif hours<0:
raise ValueError('hours must be positive')
if minutes>59:
raise ValueError('minutes must be < 60')
elif minutes<0:
raise ValueError('minutes must be positive')
if seconds>59:
raise ValueError('seconds must be < 60')
elif seconds<0:
raise ValueError('seconds must be positive')
return (hours, minutes, seconds)
# Date string parser for WFDB header - DD/MM/YYYY
def parsedatestring(datestring):
dates = re.findall(r"(?P<day>\d{2})/(?P<month>\d{2})/(?P<year>\d{4})", datestring)
if not dates:
raise ValueError("Invalid date string. Acceptable format is: 'DD/MM/YYYY'")
else:
day, month, year = dates[0]
day = int(day)
month = int(month)
year = int(year)
if year<1:
raise ValueError('year must be positive')
if month<1 or month>12:
raise ValueError('month must be between 1 and 12')
if day not in range(1, monthrange(year, month)[1]+1):
raise ValueError('day does not exist for specified year and month')
return (day, month, year)
# Returns the unique elements in a list in the order that they appear.
# Also returns the indices of the original list that correspond to each output element.
def orderedsetlist(fulllist):
uniquelist = []
original_inds = {}
for i in range(0, len(fulllist)):
item = fulllist[i]
# new item
if item not in uniquelist:
uniquelist.append(item)
original_inds[item] = [i]
# previously seen item
else:
original_inds[item].append(i)
return uniquelist, original_inds
# Returns elements in a list without consecutive repeated values.
def orderednoconseclist(fulllist):
noconseclist = [fulllist[0]]
if len(fulllist) == 1:
return noconseclist
for i in fulllist:
if i!= noconseclist[-1]:
noconseclist.append(i)
return noconseclist
# *These downloading files gateway function rely on the Record/MultiRecord objects.
# They are placed here rather than in downloads.py in order to avoid circular imports
# Download WFDB files from a physiobank database
# This function only targets databases with WFDB records (EDF and MIT format).
# If the database doesn't have a 'RECORDS" file, it will fail.
def dldatabase(pbdb, dlbasedir, records = 'all', annotators = 'all' , keepsubdirs = True, overwrite = False):
"""Download WFDB record (and optionally annotation) files from a Physiobank database. The database
must contain a 'RECORDS' file in its base directory which lists its WFDB records.
Usage:
dldatabase(pbdb, dlbasedir, records = 'all', annotators = 'all' , keepsubdirs = True, overwrite = False)
Input arguments:
- pbdb (required): The Physiobank database directory to download.
eg. For database 'http://physionet.org/physiobank/database/mitdb', pbdb = 'mitdb'.
- dlbasedir (required): The full local directory path in which to download the files.
- records (default='all'): Specifier of the WFDB records to download. Is either a list of strings
which each specify a record, or 'all' to download all records listed in the database's RECORDS file.
eg. records = ['test01_00s', test02_45s] for database https://physionet.org/physiobank/database/macecgdb/
- annotators (default='all'): Specifier of the WFDB annotation file types to download along with
the record files. Is either None to skip downloading any annotations, 'all' to download all
annotation types as specified by the ANNOTATORS file, or a list of strings which each specify an
annotation extension.
eg. annotators = ['anI'] for database https://physionet.org/physiobank/database/prcp/
- keepsubdirs (default=True): Whether to keep the relative subdirectories of downloaded files
as they are organized in Physiobank (True), or to download all files into the same base directory (False).
- overwrite (default=False): If set to True, all files will be redownloaded regardless. If set to False,
existing files with the same name and relative subdirectory will be checked. If the local file is
the same size as the online file, the download is skipped. If the local file is larger, it will be deleted
and the file will be redownloaded. If the local file is smaller, the file will be assumed to be
partially downloaded and the remaining bytes will be downloaded and appended.
Example Usage:
import wfdb
wfdb.dldatabase('ahadb', os.getcwd())
"""
# Full url physiobank database
dburl = posixpath.join(downloads.dbindexurl, pbdb)
# Check if the database is valid
r = requests.get(dburl)
r.raise_for_status()
# Get the list of records
recordlist = downloads.getrecordlist(dburl, records)
# Get the annotator extensions
annotators = downloads.getannotators(dburl, annotators)
# All files to download (relative to the database's home directory)
allfiles = []
for rec in recordlist:
# Check out whether each record is in MIT or EDF format
if rec.endswith('.edf'):
allfiles.append(rec)
else:
# If MIT format, have to figure out all associated files
allfiles.append(rec+'.hea')
dirname, baserecname = os.path.split(rec)
record = rdheader(baserecname, pbdir = posixpath.join(pbdb, dirname))
# Single segment record
if isinstance(record, Record):
# Add all dat files of the segment
for file in record.filename:
allfiles.append(posixpath.join(dirname, file))
# Multi segment record
else:
for seg in record.segname:
# Skip empty segments
if seg == '~':
continue
# Add the header
allfiles.append(posixpath.join(dirname, seg+'.hea'))
# Layout specifier has no dat files
if seg.endswith('_layout'):
continue
# Add all dat files of the segment
recseg = rdheader(seg, pbdir = posixpath.join(pbdb, dirname))
for file in recseg.filename:
allfiles.append(posixpath.join(dirname, file))
# check whether the record has any requested annotation files
if annotators is not None:
for a in annotators:
annfile = rec+'.'+a
url = posixpath.join(downloads.dbindexurl, pbdb, annfile)
rh = requests.head(url)
if rh.status_code != 404:
allfiles.append(annfile)
dlinputs = [(os.path.split(file)[1], os.path.split(file)[0], pbdb, dlbasedir, keepsubdirs, overwrite) for file in allfiles]
# Make any required local directories
downloads.makelocaldirs(dlbasedir, dlinputs, keepsubdirs)
print('Downloading files...')
# Create multiple processes to download files.
# Limit to 2 connections to avoid overloading the server
pool = multiprocessing.Pool(processes=2)
pool.map(downloads.dlpbfile, dlinputs)
print('Finished downloading files')
return
# Download specific files from a physiobank database
def dldatabasefiles(pbdb, dlbasedir, files, keepsubdirs = True, overwrite = False):
"""Download specified files from a Physiobank database.
Usage:
dldatabasefiles(pbdb, dlbasedir, files, keepsubdirs = True, overwrite = False):
Input arguments:
- pbdb (required): The Physiobank database directory to download.
eg. For database 'http://physionet.org/physiobank/database/mitdb', pbdb = 'mitdb'.
- dlbasedir (required): The full local directory path in which to download the files.
- files (required): A list of strings specifying the file names to download relative to the database
base directory
- keepsubdirs (default=True): Whether to keep the relative subdirectories of downloaded files
as they are organized in Physiobank (True), or to download all files into the same base directory (False).
- overwrite (default=False): If set to True, all files will be redownloaded regardless. If set to False,
existing files with the same name and relative subdirectory will be checked. If the local file is
the same size as the online file, the download is skipped. If the local file is larger, it will be deleted
and the file will be redownloaded. If the local file is smaller, the file will be assumed to be
partially downloaded and the remaining bytes will be downloaded and appended.
Example Usage:
import wfdb
wfdb.dldatabasefiles('ahadb', os.getcwd(), ['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea', 'data/001a.dat'])
"""
# Full url physiobank database
dburl = posixpath.join(downloads.dbindexurl, pbdb)
# Check if the database is valid
r = requests.get(dburl)
r.raise_for_status()
# Construct the urls to download
dlinputs = [(os.path.split(file)[1], os.path.split(file)[0], pbdb, dlbasedir, keepsubdirs, overwrite) for file in files]
# Make any required local directories
downloads.makelocaldirs(dlbasedir, dlinputs, keepsubdirs)
print('Downloading files...')
# Create multiple processes to download files.
# Limit to 2 connections to avoid overloading the server
pool = multiprocessing.Pool(processes=2)
pool.map(downloads.dlpbfile, dlinputs)
print('Finished downloading files')
return
| # For wrheader(), all fields must be already filled in and cohesive with one another other. The signals field will not be used.
# For wrsamp(), the field to use will be d_signals (which is allowed to be empty for 0 channel records).
# set_p_features and set_d_features use characteristics of the p_signals or d_signals field to fill in other header fields.
# These are separate from another method 'setdefaults' which the user may call to set default header fields
# The checkfieldcohesion() function will be called in wrheader which checks all the header fields.
# The checksignalcohesion() function will be called in wrsamp in wrdat to check the d_signal against the header fields.
import numpy as np
import re
import os
import posixpath
from collections import OrderedDict
from calendar import monthrange
import requests
import multiprocessing
from . import _headers
from . import _signals
from . import downloads
# The base WFDB class to extend to create Record and MultiRecord. Contains shared helper functions and fields.
class BaseRecord(object):
# Constructor
def __init__(self, recordname=None, nsig=None,
fs=None, counterfreq=None, basecounter = None,
siglen = None, basetime = None, basedate = None,
comments = None, signame=None):
self.recordname = recordname
self.nsig = nsig
self.fs = fs
self.counterfreq = counterfreq
self.basecounter = basecounter
self.siglen = siglen
self.basetime = basetime
self.basedate = basedate
self.comments = comments
self.signame = signame
# Check whether a single field is valid in its basic form. Does not check compatibility with other fields.
# ch is only used for signal specification fields, specifying the channels to check. Other channels
# can be None.
# Be aware that this function is not just called from wrheader.
def checkfield(self, field, channels=None):
# Check that the field is present
if getattr(self, field) is None:
raise Exception("Missing field required: "+field)
# Check the type of the field (and of its elements if it should be a list)
self.checkfieldtype(field, channels)
# Expand to make sure all channels must have present field
if channels == 'all':
channels = [1]*len(getattr(self, field))
# Individual specific field checks:
if field == 'd_signals':
# Check shape
if self.d_signals.ndim != 2:
raise TypeError("d_signals must be a 2d numpy array")
# Check dtype
if self.d_signals.dtype not in [np.dtype('int64'), np.dtype('int32'), np.dtype('int16'), np.dtype('int8')]:
raise TypeError('d_signals must be a 2d numpy array with dtype == int64, int32, int16, or int8.')
elif field =='p_signals':
# Check shape
if self.p_signals.ndim != 2:
raise TypeError("p_signals must be a 2d numpy array")
elif field == 'e_d_signals':
# Check shape
for ch in range(len(channels)):
if self.e_d_signals[ch].ndim != 1:
raise TypeError("e_d_signals must be a list of 1d numpy arrays")
# Check dtype
if self.e_d_signals[ch].dtype not in [np.dtype('int64'), np.dtype('int32'), np.dtype('int16'), np.dtype('int8')]:
raise TypeError('e_d_d_signals must be a list of 1d numpy arrays with dtype == int64, int32, int16, or int8.')
elif field =='e_p_signals':
# Check shape
for ch in range(0, len(channels)):
if self.e_p_signals.ndim != 1:
raise TypeError("e_p_signals must be a list of 1d numpy arrays")
#elif field == 'segments': # Nothing to check here.
# Record specification fields
elif field == 'recordname':
# Allow letters, digits, hyphens, and underscores.
acceptedstring = re.match('[-\w]+', self.recordname)
if not acceptedstring or acceptedstring.string != self.recordname:
raise ValueError('recordname must only comprise of letters, digits, hyphens, and underscores.')
elif field == 'nseg':
if self.nseg <=0:
raise ValueError('nseg must be a positive integer')
elif field == 'nsig':
if self.nsig <=0:
raise ValueError('nsig must be a positive integer')
elif field == 'fs':
if self.fs<=0:
raise ValueError('fs must be a positive number')
elif field == 'counterfreq':
if self.counterfreq <=0:
raise ValueError('counterfreq must be a positive number')
elif field == 'basecounter':
if self.basecounter <=0:
raise ValueError('basecounter must be a positive number')
elif field == 'siglen':
if self.siglen <0:
raise ValueError('siglen must be a non-negative integer')
elif field == 'basetime':
_ = parsetimestring(self.basetime)
elif field == 'basedate':
_ = parsedatestring(self.basedate)
# Signal specification fields. Lists of elements to check.
elif field in _headers.sigfieldspecs:
for ch in range(0, len(channels)):
f = getattr(self, field)[ch]
# The channel element is allowed to be None
if not channels[ch]:
if f is None:
continue
if field == 'filename':
# Check for filename characters
acceptedstring = re.match('[-\w]+\.?[\w]+',f)
if not acceptedstring or acceptedstring.string != f:
raise ValueError('File names should only contain alphanumerics, hyphens, and an extension. eg. record_100.dat')
# Check that dat files are grouped together
if orderedsetlist(self.filename)[0] != orderednoconseclist(self.filename):
raise ValueError('filename error: all entries for signals that share a given file must be consecutive')
elif field == 'fmt':
if f not in _signals.datformats:
raise ValueError('File formats must be valid WFDB dat formats: '+' , '.join(_signals.datformats))
elif field == 'sampsperframe':
if f < 1:
raise ValueError('sampsperframe values must be positive integers')
elif field == 'skew':
if f < 0:
raise ValueError('skew values must be non-negative integers')
elif field == 'byteoffset':
if f < 0:
raise ValueError('byteoffset values must be non-negative integers')
elif field == 'adcgain':
if f <= 0:
raise ValueError('adcgain values must be positive numbers')
elif field == 'baseline':
# Currently original WFDB library only has 4 bytes for baseline.
if f < -2147483648 or f> 2147483648:
raise ValueError('baseline values must be between -2147483648 (-2^31) and 2147483647 (2^31 -1)')
elif field == 'units':
if re.search('\s', f):
raise ValueError('units strings may not contain whitespaces.')
elif field == 'adcres':
if f < 0:
raise ValueError('adcres values must be non-negative integers')
# elif field == 'adczero': nothing to check here
# elif field == 'initvalue': nothing to check here
# elif field == 'checksum': nothing to check here
elif field == 'blocksize':
if f < 0:
raise ValueError('blocksize values must be non-negative integers')
elif field == 'signame':
if re.search('\s', f):
raise ValueError('signame strings may not contain whitespaces.')
if len(set(self.signame)) != len(self.signame):
raise ValueError('signame strings must be unique.')
# Segment specification fields
elif field == 'segname':
# Segment names must be alphanumerics or just a single '~'
for f in self.segname:
if f == '~':
continue
acceptedstring = re.match('[-\w]+',f)
if not acceptedstring or acceptedstring.string != f:
raise ValueError("Non-null segment names may only contain alphanumerics and dashes. Null segment names must be set to '~'")
elif field == 'seglen':
# For records with more than 1 segment, the first segment may be
# the layout specification segment with a length of 0
if len(self.seglen)>1:
if self.seglen[0] < 0:
raise ValueError('seglen values must be positive integers. Only seglen[0] may be 0 to indicate a layout segment')
sl = self.seglen[1:]
else:
sl = self.seglen
for f in sl:
if f < 1:
raise ValueError('seglen values must be positive integers. Only seglen[0] may be 0 to indicate a layout segment')
# Comment field
elif field == 'comments':
for f in self.comments:
if f=='': # Allow empty string comment lines
continue
if f[0] == '#':
print("Note: comment strings do not need to begin with '#'. This library adds them automatically.")
if re.search('[\t\n\r\f\v]', f):
raise ValueError('comments may not contain tabs or newlines (they may contain spaces and underscores).')
# Check the data type of the specified field.
# ch is used for signal spec fields
# Some fields are lists. This must be checked, along with their elements.
def checkfieldtype(self, field, ch=None):
item = getattr(self, field)
# Record specification field. Nonlist.
if field in _headers.recfieldspecs:
checkitemtype(item, field, _headers.recfieldspecs[field].allowedtypes)
# Signal specification field. List.
elif field in _headers.sigfieldspecs:
checkitemtype(item, field, _headers.sigfieldspecs[field].allowedtypes, ch)
# Segment specification field. List. All elements cannot be None
elif field in _headers.segfieldspecs:
checkitemtype(item, field, _headers.segfieldspecs[field].allowedtypes, 'all')
# Comments field. List. Elements cannot be None
elif field == 'comments':
checkitemtype(item, field, (str), 'all')
# Signals field.
elif field in ['p_signals','d_signals']:
checkitemtype(item, field, (np.ndarray))
elif field in ['e_p_signals', 'e_d_signals']:
checkitemtype(item, field, (np.ndarray), 'all')
# Segments field. List. Elements may be None.
elif field == 'segments':
checkitemtype(item, field, (Record), 'none')
# Ensure that input read parameters are valid for the record
def checkreadinputs(self, sampfrom, sampto, channels, physical, m2s, smoothframes, returnres):
# Data Type Check
if not hasattr(sampfrom, '__index__'):
raise TypeError('sampfrom must be an integer')
if not hasattr(sampto, '__index__'):
raise TypeError('sampto must be an integer')
if not isinstance(channels, list):
raise TypeError('channels must be a list of integers')
# Duration Ranges
if sampfrom<0:
raise ValueError('sampfrom must be a non-negative integer')
if sampfrom>self.siglen:
raise ValueError('sampfrom must be shorter than the signal length')
if sampto<0:
raise ValueError('sampto must be a non-negative integer')
if sampto>self.siglen:
raise ValueError('sampto must be shorter than the signal length')
if sampto<=sampfrom:
raise ValueError('sampto must be greater than sampfrom')
# Channel Ranges
for c in channels:
if c<0:
raise ValueError('Input channels must all be non-negative integers')
if c>self.nsig-1:
raise ValueError('Input channels must all be lower than the total number of channels')
if returnres not in [64, 32, 16, 8]:
raise ValueError("returnres must be one of the following: 64, 32, 16, 8")
if physical is True and returnres == 8:
raise ValueError("returnres must be one of the following when physical is True: 64, 32, 16")
# Cannot expand multiple samples/frame for multi-segment records
if isinstance(self, MultiRecord):
# If m2s == True, Physical must be true. There is no
# meaningful representation of digital signals transferred
# from individual segments.
if m2s is True and physical is not True:
raise Exception('If m2s is True, physical must also be True.')
if smoothframes is False:
raise ValueError('This package version cannot expand all samples when reading multi-segment records. Must enable frame smoothing.')
# Check the item type. Vary the print message regarding whether the item can be None.
# Helper to checkfieldtype
# channels is a list of booleans indicating whether the field's channel must be present (1) or may be None (0)
# and is not just for signal specification fields
def checkitemtype(item, field, allowedtypes, channels=None):
# Checking the list
if channels is not None:
# First make sure the item is a list
if not isinstance(item, list):
raise TypeError("Field: '"+field+"' must be a list")
# Expand to make sure all channels must have present field
if channels == 'all':
channels = [1]*len(item)
# Expand to allow any channel to be None
if channels == 'none':
channels = [0]*len(item)
for ch in range(0, len(channels)):
mustexist=channels[ch]
# The field must exist for the channel
if mustexist:
if not isinstance(item[ch], allowedtypes):
raise TypeError("Channel "+str(ch)+" of field: '"+field+"' must be one of the following types:", allowedtypes)
# The field may be None for the channel
else:
if not isinstance(item[ch], allowedtypes) and item[ch] is not None:
raise TypeError("Channel "+str(ch)+" of field: '"+field+"' must be a 'None', or one of the following types:", allowedtypes)
# Single scalar to check
else:
if not isinstance(item, allowedtypes):
raise TypeError("Field: '"+field+"' must be one of the following types:", allowedtypes)
class Record(BaseRecord, _headers.HeadersMixin, _signals.SignalsMixin):
"""
The class representing WFDB headers, and single segment WFDB records.
Record objects can be created using the constructor, by reading a WFDB header
with 'rdheader', or a WFDB record (header and associated dat files) with rdsamp'
or 'srdsamp'.
The attributes of the Record object give information about the record as specified
by https://www.physionet.org/physiotools/wag/header-5.htm
In addition, the d_signals and p_signals attributes store the digital and physical
signals of WFDB records with at least one channel.
Contructor function:
def __init__(self, p_signals=None, d_signals=None,
recordname=None, nsig=None,
fs=None, counterfreq=None, basecounter=None,
siglen=None, basetime=None, basedate=None,
filename=None, fmt=None, sampsperframe=None,
skew=None, byteoffset=None, adcgain=None,
baseline=None, units=None, adcres=None,
adczero=None, initvalue=None, checksum=None,
blocksize=None, signame=None, comments=None)
Example Usage:
import wfdb
record = wfdb.Record(recordname='r1', fs=250, nsig=2, siglen=1000, filename=['r1.dat','r1.dat'])
"""
# Constructor
def __init__(self, p_signals=None, d_signals=None,
e_p_signals=None, e_d_signals=None,
recordname=None, nsig=None,
fs=None, counterfreq=None, basecounter=None,
siglen=None, basetime=None, basedate=None,
filename=None, fmt=None, sampsperframe=None,
skew=None, byteoffset=None, adcgain=None,
baseline=None, units=None, adcres=None,
adczero=None, initvalue=None, checksum=None,
blocksize=None, signame=None, comments=None):
# Note the lack of 'nseg' field. Single segment records cannot have this field. Even nseg = 1 makes
# the header a multi-segment header.
super(Record, self).__init__(recordname, nsig,
fs, counterfreq, basecounter, siglen,
basetime, basedate, comments, signame)
self.p_signals = p_signals
self.d_signals = d_signals
self.e_p_signals = e_p_signals
self.e_d_signals = e_d_signals
self.filename=filename
self.fmt=fmt
self.sampsperframe=sampsperframe
self.skew=skew
self.byteoffset=byteoffset
self.adcgain=adcgain
self.baseline=baseline
self.units=units
self.adcres=adcres
self.adczero=adczero
self.initvalue=initvalue
self.checksum=checksum
self.blocksize=blocksize
# Equal comparison operator for objects of this type
def __eq__(self, other):
att1 = self.__dict__
att2 = other.__dict__
if set(att1.keys()) != set(att2.keys()):
return False
for k in att1.keys():
v1 = att1[k]
v2 = att2[k]
if type(v1) != type(v2):
return False
if type(v1) == np.ndarray:
if not np.array_equal(v1, v2):
return False
else:
if v1 != v2:
return False
return True
# Write a wfdb header file and associated dat files if any.
# Uses d_signals (expanded=False) or e_d_signals to write the samples
def wrsamp(self, expanded=False):
# Perform field validity and cohesion checks, and write the header file.
self.wrheader()
if self.nsig>0:
# Perform signal validity and cohesion checks, and write the associated dat files.
self.wrdats(expanded)
# Arrange/edit object fields to reflect user channel and/or signal range input
# Account for case when signals are expanded
def arrangefields(self, channels, expanded=False):
# Rearrange signal specification fields
for field in _headers.sigfieldspecs:
item = getattr(self, field)
setattr(self, field, [item[c] for c in channels])
# Expanded signals - multiple samples per frame.
if expanded:
# Checksum and initvalue to be updated if present
# unless the whole signal length was input
if self.siglen != int(len(self.e_d_signals[0])/self.sampsperframe[0]):
self.checksum = self.calc_checksum(expanded)
self.initvalue = [s[0] for s in self.e_d_signals]
self.nsig = len(channels)
self.siglen = int(len(self.e_d_signals[0])/self.sampsperframe[0])
# MxN numpy array d_signals
else:
# Checksum and initvalue to be updated if present
# unless the whole signal length was input
if self.siglen != self.d_signals.shape[0]:
if self.checksum is not None:
self.checksum = self.calc_checksum()
if self.initvalue is not None:
ival = list(self.d_signals[0, :])
self.initvalue = [int(i) for i in ival]
# Update record specification parameters
# Important that these get updated after^^
self.nsig = len(channels)
self.siglen = self.d_signals.shape[0]
# Class for multi segment WFDB records.
class MultiRecord(BaseRecord, _headers.MultiHeadersMixin):
"""
The class representing multi-segment WFDB records.
MultiRecord objects can be created using the constructor, or by reading a multi-segment
WFDB record using 'rdsamp' with the 'm2s' (multi to single) input parameter set to False.
The attributes of the MultiRecord object give information about the entire record as specified
by https://www.physionet.org/physiotools/wag/header-5.htm
In addition, the 'segments' parameter is a list of Record objects representing each
individual segment, or 'None' representing empty segments, of the entire multi-segment record.
Noteably, this class has no attribute representing the signals as a whole. The 'multi_to_single'
instance method can be called on MultiRecord objects to return a single segment representation
of the record as a Record object. The resulting Record object will have its 'p_signals' field set.
Contructor function:
def __init__(self, segments=None, layout=None,
recordname=None, nsig=None, fs=None,
counterfreq=None, basecounter=None,
siglen=None, basetime=None, basedate=None,
segname=None, seglen=None, comments=None,
signame=None, sigsegments=None)
Example Usage:
import wfdb
recordM = wfdb.MultiRecord(recordname='rm', fs=50, nsig=8, siglen=9999,
segname=['rm_1', '~', rm_2'], seglen=[800, 200, 900])
recordL = wfdb.rdsamp('s00001-2896-10-10-00-31', m2s = False)
recordL = recordL.multi_to_single()
"""
# Constructor
def __init__(self, segments=None, layout=None,
recordname=None, nsig=None, fs=None,
counterfreq=None, basecounter=None,
siglen=None, basetime=None, basedate=None,
segname=None, seglen=None, comments=None,
signame=None, sigsegments=None):
super(MultiRecord, self).__init__(recordname, nsig,
fs, counterfreq, basecounter, siglen,
basetime, basedate, comments, signame)
self.layout = layout
self.segments = segments
self.segname = segname
self.seglen = seglen
self.sigsegments=sigsegments
# Write a multi-segment header, along with headers and dat files for all segments
def wrsamp(self):
# Perform field validity and cohesion checks, and write the header file.
self.wrheader()
# Perform record validity and cohesion checks, and write the associated segments.
for seg in self.segments:
seg.wrsamp()
# Check the cohesion of the segments field with other fields used to write the record
def checksegmentcohesion(self):
# Check that nseg is equal to the length of the segments field
if self.nseg != len(self.segments):
raise ValueError("Length of segments must match the 'nseg' field")
for i in range(0, nseg):
s = self.segments[i]
# If segment 0 is a layout specification record, check that its file names are all == '~''
if i==0 and self.seglen[0] == 0:
for filename in s.filename:
if filename != '~':
raise ValueError("Layout specification records must have all filenames named '~'")
# Check that sampling frequencies all match the one in the master header
if s.fs != self.fs:
raise ValueError("The 'fs' in each segment must match the overall record's 'fs'")
# Check the signal length of the segment against the corresponding seglen field
if s.siglen != self.seglen[i]:
raise ValueError('The signal length of segment '+str(i)+' does not match the corresponding segment length')
totalsiglen = totalsiglen + getattr(s, 'siglen')
# No need to check the sum of siglens from each segment object against siglen
# Already effectively done it when checking sum(seglen) against siglen
# Determine the segments and the samples
# within each segment that have to be read in a
# multi-segment record. Called during rdsamp.
def requiredsegments(self, sampfrom, sampto, channels):
# The starting segment with actual samples
if self.layout == 'Fixed':
startseg = 0
else:
startseg = 1
# Cumulative sum of segment lengths (ignoring layout segment)
cumsumlengths = list(np.cumsum(self.seglen[startseg:]))
# Get first segment
readsegs = [[sampfrom < cs for cs in cumsumlengths].index(True)]
# Get final segment
if sampto == cumsumlengths[len(cumsumlengths) - 1]:
readsegs.append(len(cumsumlengths) - 1)
else:
readsegs.append([sampto <= cs for cs in cumsumlengths].index(True))
# Add 1 for variable layout records
readsegs = list(np.add(readsegs,startseg))
# Obtain the sampfrom and sampto to read for each segment
if readsegs[1] == readsegs[0]:
# Only one segment to read
readsegs = [readsegs[0]]
# The segment's first sample number relative to the entire record
segstartsamp = sum(self.seglen[0:readsegs[0]])
readsamps = [[sampfrom-segstartsamp, sampto-segstartsamp]]
else:
# More than one segment to read
readsegs = list(range(readsegs[0], readsegs[1]+1))
readsamps = [[0, self.seglen[s]] for s in readsegs]
# Starting sample for first segment.
readsamps[0][0] = sampfrom - ([0] + cumsumlengths)[readsegs[0]-startseg]
# End sample for last segment
readsamps[-1][1] = sampto - ([0] + cumsumlengths)[readsegs[-1]-startseg]
return (readsegs, readsamps)
# Get the channel numbers to be read from each segment
def requiredsignals(self, readsegs, channels, dirname, pbdir):
# Fixed layout. All channels are the same.
if self.layout == 'Fixed':
# Should we bother here with skipping empty segments?
# They won't be read anyway.
readsigs = [channels]*len(readsegs)
# Variable layout: figure out channels by matching record names
else:
readsigs = []
# The overall layout signal names
l_signames = self.segments[0].signame
# The wanted signals
w_signames = [l_signames[c] for c in channels]
# For each segment ...
for i in range(0, len(readsegs)):
# Skip empty segments
if self.segname[readsegs[i]] == '~':
readsigs.append(None)
else:
# Get the signal names of the current segment
s_signames = rdheader(os.path.join(dirname, self.segname[readsegs[i]]), pbdir = pbdir).signame
readsigs.append(wanted_siginds(w_signames, s_signames))
return readsigs
# Arrange/edit object fields to reflect user channel and/or signal range input
def arrangefields(self, readsegs, segranges, channels):
# Update seglen values for relevant segments
for i in range(0, len(readsegs)):
self.seglen[readsegs[i]] = segranges[i][1] - segranges[i][0]
# Update record specification parameters
self.nsig = len(channels)
self.siglen = sum([sr[1]-sr[0] for sr in segranges])
# Get rid of the segments and segment line parameters
# outside the desired segment range
if self.layout == 'Fixed':
self.segments = self.segments[readsegs[0]:readsegs[-1]+1]
self.segname = self.segname[readsegs[0]:readsegs[-1]+1]
self.seglen = self.seglen[readsegs[0]:readsegs[-1]+1]
else:
# Keep the layout specifier segment
self.segments = [self.segments[0]] + self.segments[readsegs[0]:readsegs[-1]+1]
self.segname = [self.segname[0]] + self.segname[readsegs[0]:readsegs[-1]+1]
self.seglen = [self.seglen[0]] + self.seglen[readsegs[0]:readsegs[-1]+1]
# Update number of segments
self.nseg = len(self.segments)
# Convert a MultiRecord object to a Record object
def multi_to_single(self, returnres):
# The fields to transfer to the new object
fields = self.__dict__.copy()
# Remove multirecord fields
del(fields['segments'])
del(fields['segname'])
del(fields['seglen'])
del(fields['nseg'])
# The output physical signals
if returnres == 64:
floatdtype = 'float64'
elif returnres == 32:
floatdtype = 'float32'
else:
floatdtype = 'float16'
p_signals = np.zeros([self.siglen, self.nsig], dtype=floatdtype)
# Get the physical samples from each segment
# Start and end samples in the overall array
# to place the segment samples into
startsamps = [0] + list(np.cumsum(self.seglen)[0:-1])
endsamps = list(np.cumsum(self.seglen))
if self.layout == 'Fixed':
# Get the signal names and units from the first segment
fields['signame'] = self.segments[0].signame
fields['units'] = self.segments[0].units
for i in range(self.nseg):
p_signals[startsamps[i]:endsamps[i],:] = self.segments[i].p_signals
# For variable layout, have to get channels by name
else:
# Get the signal names from the layout segment
fields['signame'] = self.segments[0].signame
fields['units'] = self.segments[0].units
for i in range(1, self.nseg):
seg = self.segments[i]
# Empty segment
if seg is None:
p_signals[startsamps[i]:endsamps[i],:] = np.nan
# Non-empty segment
else:
# Figure out if there are any channels wanted and
# the output channels they are to be stored in
inchannels = []
outchannels = []
for s in fields['signame']:
if s in seg.signame:
inchannels.append(seg.signame.index(s))
outchannels.append(fields['signame'].index(s))
# Segment contains no wanted channels. Fill with nans.
if inchannels == []:
p_signals[startsamps[i]:endsamps[i],:] = np.nan
# Segment contains wanted channel(s). Transfer samples.
else:
# This statement is necessary in case this function is not called
# directly from rdsamp with m2s=True.
if not hasattr(seg, 'p_signals'):
seg.p_signals = seg.dac(returnres=returnres)
for ch in range(0, fields['nsig']):
if ch not in outchannels:
p_signals[startsamps[i]:endsamps[i],ch] = np.nan
else:
p_signals[startsamps[i]:endsamps[i],ch] = seg.p_signals[:, inchannels[outchannels.index(ch)]]
# Create the single segment Record object and set attributes
record = Record()
for field in fields:
setattr(record, field, fields[field])
record.p_signals = p_signals
return record
#------------------- Reading Records -------------------#
# Read a WFDB single or multi segment record. Return a Record or MultiRecord object
def rdsamp(recordname, sampfrom=0, sampto=None, channels = None, physical = True, pbdir = None,
m2s = True, smoothframes = True, ignoreskew=False, returnres=64):
"""Read a WFDB record and return the signal and record descriptors as attributes in a
Record or MultiRecord object.
Usage:
record = rdsamp(recordname, sampfrom=0, sampto=None, channels=None, physical=True, pbdir = None,
m2s=True, smoothframes = True, ignoreskew=False)
Input arguments:
- recordname (required): The name of the WFDB record to be read (without any file extensions).
If the argument contains any path delimiter characters, the argument will be interpreted as
PATH/baserecord and the data files will be searched for in the local path.
- sampfrom (default=0): The starting sample number to read for each channel.
- sampto (default=None): The sample number at which to stop reading for each channel.
- channels (default=all): Indices specifying the channel to be returned.
- physical (default=True): Flag that specifies whether to return signals in physical units in
the p_signals field (True), or digital units in the d_signals field (False).
- pbdir (default=None): Option used to stream data from Physiobank. The Physiobank database
directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb', pbdir = 'mitdb'.
- m2s (default=True): Flag used when reading multi-segment records. Specifies whether to
directly return a wfdb MultiRecord object (False), or to convert it into and return a wfdb
Record object (True).
- smoothframes (default=True): Flag used when reading records with signals having multiple
samples per frame. Specifies whether to smooth the samples in signals with more than
one sample per frame and return an mxn uniform numpy array as the d_signals or p_signals
field (True), or to return a list of 1d numpy arrays containing every expanded sample as
the e_d_signals or e_p_signals field (False).
- ignoreskew (default=False): Flag used when reading records with at least one skewed signal.
Specifies whether to apply the skew to align the signals in the output variable (False), or
to ignore the skew field and load in all values contained in the dat files unaligned (True).
- returnres (default=64): The numpy array dtype of the returned signals. Options are: 64, 32,
16, and 8, where the value represents the numpy int or float dtype. Note that the value
cannot be 8 when physical is True since there is no float8 format.
Output argument:
- record: The wfdb Record or MultiRecord object representing the contents of the record read.
Note: If a signal range or channel selection is specified when calling this function, the
the resulting attributes of the returned object will be set to reflect the section
of the record that is actually read, rather than necessarily what is in the header file.
For example, if channels = [0, 1, 2] is specified when reading a 12 channel record, the
'nsig' attribute will be 3, not 12.
Note: The 'srdsamp' function exists as a simple alternative to 'rdsamp' for the most common
purpose of extracting the physical signals and a few important descriptor fields.
'srdsamp' returns two arguments: the physical signals array, and a dictionary of a
few select fields, a subset of the original wfdb Record attributes.
Example Usage:
import wfdb
ecgrecord = wfdb.rdsamp('sampledata/test01_00s', sampfrom=800, channels = [1,3])
"""
dirname, baserecordname = os.path.split(recordname)
# Read the header fields into the appropriate record object
record = rdheader(recordname, pbdir = pbdir, rdsegments = False)
# Set defaults for sampto and channels input variables
if sampto is None:
sampto = record.siglen
if channels is None:
channels = list(range(record.nsig))
# Ensure that input fields are valid for the record
record.checkreadinputs(sampfrom, sampto, channels, physical, m2s, smoothframes, returnres)
# A single segment record
if isinstance(record, Record):
# Only 1 sample/frame, or frames are smoothed. Return uniform numpy array
if smoothframes or max([record.sampsperframe[c] for c in channels])==1:
# Read signals from the associated dat files that contain wanted channels
record.d_signals = _signals.rdsegment(record.filename, dirname, pbdir, record.nsig, record.fmt, record.siglen,
record.byteoffset, record.sampsperframe, record.skew,
sampfrom, sampto, channels, smoothframes, ignoreskew)
# Arrange/edit the object fields to reflect user channel and/or signal range input
record.arrangefields(channels, expanded=False)
if physical is True:
# Perform inplace dac to get physical signal
record.dac(expanded=False, returnres=returnres, inplace=True)
# Return each sample of the signals with multiple samples per frame
else:
record.e_d_signals = _signals.rdsegment(record.filename, dirname, pbdir, record.nsig, record.fmt, record.siglen,
record.byteoffset, record.sampsperframe, record.skew,
sampfrom, sampto, channels, smoothframes, ignoreskew)
# Arrange/edit the object fields to reflect user channel and/or signal range input
record.arrangefields(channels, expanded=True)
if physical is True:
# Perform dac to get physical signal
record.dac(expanded=True, returnres=returnres, inplace=True)
# A multi segment record
# We can make another rdsamp function (called rdsamp_segment) to call
# for individual segments to deal with the skews.
else:
# Strategy:
# 1. Read the required segments and store them in
# Record objects.
# 2. Update the parameters of the objects to reflect
# the state of the sections read.
# 3. Update the parameters of the overall MultiRecord
# object to reflect the state of the individual segments.
# 4. If specified, convert the MultiRecord object
# into a single Record object.
# Segments field is a list of Record objects
# Empty segments store None.
record.segments = [None]*record.nseg
# Variable layout
if record.seglen[0] == 0:
record.layout = 'Variable'
# Read the layout specification header
record.segments[0] = rdheader(os.path.join(dirname, record.segname[0]), pbdir=pbdir)
# Fixed layout
else:
record.layout = 'Fixed'
# The segment numbers and samples within each segment to read.
readsegs, segranges = record.requiredsegments(sampfrom, sampto, channels)
# The signals within each segment to read
segsigs = record.requiredsignals(readsegs, channels, dirname, pbdir)
# Read the desired samples in the relevant segments
for i in range(len(readsegs)):
segnum = readsegs[i]
# Empty segment or segment with no relevant channels
if record.segname[segnum] == '~' or segsigs[i] is None:
record.segments[segnum] = None
else:
record.segments[segnum] = rdsamp(os.path.join(dirname, record.segname[segnum]),
sampfrom = segranges[i][0], sampto = segranges[i][1],
channels = segsigs[i], physical = True, pbdir=pbdir)
# Arrange the fields of the overall object to reflect user input
record.arrangefields(readsegs, segranges, channels)
# Convert object into a single segment Record object
if m2s:
record = record.multi_to_single(returnres=returnres)
# Perform dtype conversion if necessary
if isinstance(record, Record) and record.nsig>0:
record.convert_dtype(physical, returnres, smoothframes)
return record
# Read a WFDB header. Return a Record object or MultiRecord object
def rdheader(recordname, pbdir = None, rdsegments = False):
"""Read a WFDB header file and return the record descriptors as attributes in a Record object
Usage:
record = rdheader(recordname, pbdir = None, rdsegments = False)
Input arguments:
- recordname (required): The name of the WFDB record to be read (without any file extensions).
If the argument contains any path delimiter characters, the argument will be interpreted as
PATH/baserecord and the header file will be searched for in the local path.
- pbdir (default=None): Option used to stream data from Physiobank. The Physiobank database
directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb', pbdir = 'mitdb'.
- rdsegments (default=False): Boolean flag used when reading multi-segment headers. If True,
segment headers will also be read (into the record object's 'segments' field).
Output argument:
- record: The wfdb Record or MultiRecord object representing the contents of the header read.
Example Usage:
import wfdb
ecgrecord = wfdb.rdheader('sampledata/test01_00s', sampfrom=800, channels = [1,3])
"""
# Read the header file. Separate comment and non-comment lines
headerlines, commentlines = _headers.getheaderlines(recordname, pbdir)
# Get fields from record line
d_rec = _headers.read_rec_line(headerlines[0])
# Processing according to whether the header is single or multi segment
# Single segment header - Process signal specification lines
if d_rec['nseg'] is None:
# Create a single-segment WFDB record object
record = Record()
# There is at least one channel
if len(headerlines)>1:
# Read the fields from the signal lines
d_sig = _headers.read_sig_lines(headerlines[1:])
# Set the object's signal line fields
for field in _headers.sigfieldspecs:
setattr(record, field, d_sig[field])
# Set the object's record line fields
for field in _headers.recfieldspecs:
if field == 'nseg':
continue
setattr(record, field, d_rec[field])
# Multi segment header - Process segment specification lines
else:
# Create a multi-segment WFDB record object
record = MultiRecord()
# Read the fields from the segment lines
d_seg = _headers.read_seg_lines(headerlines[1:])
# Set the object's segment line fields
for field in _headers.segfieldspecs:
setattr(record, field, d_seg[field])
# Set the objects' record line fields
for field in _headers.recfieldspecs:
setattr(record, field, d_rec[field])
# Determine whether the record is fixed or variable
if record.seglen[0] == 0:
record.layout = 'Variable'
else:
record.layout = 'Fixed'
# If specified, read the segment headers
if rdsegments:
record.segments = []
# Get the base record name (could be empty)
dirname = os.path.split(recordname)[0]
for s in record.segname:
if s == '~':
record.segments.append(None)
else:
record.segments.append(rdheader(os.path.join(dirname,s), pbdir))
# Fill in the signame attribute
record.signame = record.getsignames()
# Fill in the sigsegments attribute
record.sigsegments = record.getsigsegments()
# Set the comments field
record.comments = []
for line in commentlines:
record.comments.append(line.strip(' \t#'))
return record
# Given some wanted signal names, and the signal names contained
# in a record, return the indices of the record channels that intersect.
# Remember that the wanted signal names are already in order specified in user input channels. So it's good!
def wanted_siginds(wanted_signames, record_signames):
contained_signals = [s for s in wanted_signames if s in record_signames]
if contained_signals == []:
return None
else:
return [record_signames.index(s) for s in contained_signals]
# A simple version of rdsamp for ease of use
# Return the physical signals and a few essential fields
def srdsamp(recordname, sampfrom=0, sampto=None, channels = None, pbdir = None):
"""Read a WFDB record and return the physical signal and a few important descriptor fields
Usage:
signals, fields = srdsamp(recordname, sampfrom=0, sampto=None, channels=None, pbdir=None)
Input arguments:
- recordname (required): The name of the WFDB record to be read (without any file extensions).
If the argument contains any path delimiter characters, the argument will be interpreted as
PATH/baserecord and the data files will be searched for in the local path.
- sampfrom (default=0): The starting sample number to read for each channel.
- sampto (default=None): The sample number at which to stop reading for each channel.
- channels (default=all): Indices specifying the channel to be returned.
Output arguments:
- signals: A 2d numpy array storing the physical signals from the record.
- fields: A dictionary specifying several key attributes of the read record:
- fs: The sampling frequency of the record
- units: The units for each channel
- signame: The signal name for each channel
- comments: Any comments written in the header
Note: If a signal range or channel selection is specified when calling this function, the
the resulting attributes of the returned object will be set to reflect the section
of the record that is actually read, rather than necessarily what is in the header file.
For example, if channels = [0, 1, 2] is specified when reading a 12 channel record, the
'nsig' attribute will be 3, not 12.
Note: The 'rdsamp' function is the base function upon which this one is built. It returns
all attributes present, along with the signals, as attributes in a wfdb.Record object.
The function, along with the returned data type, have more options than 'srdsamp' for
users who wish to more directly manipulate WFDB files.
Example Usage:
import wfdb
sig, fields = wfdb.srdsamp('sampledata/test01_00s', sampfrom=800, channels = [1,3])
"""
record = rdsamp(recordname, sampfrom, sampto, channels, True, pbdir, True)
signals = record.p_signals
fields = {}
for field in ['fs','units','signame', 'comments']:
fields[field] = getattr(record, field)
return signals, fields
#------------------- /Reading Records -------------------#
# Function for writing single segment records
def wrsamp(recordname, fs, units, signames, p_signals=None, d_signals=None,
fmt=None, gain=None, baseline=None, comments=None, basetime=None,
basedate=None):
"""Write a single segment WFDB record, creating a WFDB header file and any associated dat files.
Usage:
wrsamp(recordname, fs, units, signames, p_signals = None, d_signals=None,
fmt = None, gain = None, baseline = None, comments = None)
Input arguments:
- recordname (required): The string name of the WFDB record to be written (without any file extensions).
- fs (required): The numerical sampling frequency of the record.
- units (required): A list of strings giving the units of each signal channel.
- signames (required): A list of strings giving the signal name of each signal channel.
- p_signals (default=None): An MxN 2d numpy array, where M is the signal length. Gives the physical signal
values intended to be written. Either p_signals or d_signals must be set, but not both. If p_signals
is set, this method will use it to perform analogue-digital conversion, writing the resultant digital
values to the dat file(s). If fmt is set, gain and baseline must be set or unset together. If fmt is
unset, gain and baseline must both be unset.
- d_signals (default=None): An MxN 2d numpy array, where M is the signal length. Gives the digital signal
values intended to be directly written to the dat file(s). The dtype must be an integer type. Either
p_signals or d_signals must be set, but not both. In addition, if d_signals is set, fmt, gain and baseline
must also all be set.
- fmt (default=None): A list of strings giving the WFDB format of each file used to store each channel.
Accepted formats are: "80","212","16","24", and "32". There are other WFDB formats but this library
will not write (though it will read) those file types.
- gain (default=None): A list of integers specifying the ADC gain.
- baseline (default=None): A list of integers specifying the digital baseline.
- comments (default=None): A list of string comments to be written to the header file.
- basetime (default=None): A string of the record's start time in 24h HH:MM:SS(.ms) format.
- basedate (default=None): A string of the record's start date in DD/MM/YYYY format.
Note: This gateway function was written to enable a simple way to write WFDB record files using
the most frequently used parameters. Therefore not all WFDB fields can be set via this function.
For more control over attributes, create a wfdb.Record object, manually set its attributes, and
call its wrsamp() instance method. If you choose this more advanced method, see also the setdefaults,
set_d_features, and set_p_features instance methods to help populate attributes.
Example Usage (with the most common scenario of input parameters):
import wfdb
# Read part of a record from Physiobank
sig, fields = wfdb.srdsamp('a103l', sampfrom = 50000, channels = [0,1], pbdir = 'challenge/2015/training')
# Write a local WFDB record (manually inserting fields)
wfdb.wrsamp('ecgrecord', fs = 250, units = ['mV', 'mV'], signames = ['I', 'II'], p_signals = sig, fmt = ['16', '16'])
"""
# Check input field combinations
if p_signals is not None and d_signals is not None:
raise Exception('Must only give one of the inputs: p_signals or d_signals')
if d_signals is not None:
if fmt is None or gain is None or baseline is None:
raise Exception("When using d_signals, must also specify 'fmt', 'gain', and 'baseline' fields.")
# Depending on whether d_signals or p_signals was used, set other required features.
if p_signals is not None:
# Create the Record object
record = Record(recordname=recordname, p_signals=p_signals, fs=fs,
fmt=fmt, units=units, signame=signames, adcgain = gain,
baseline=baseline, comments=comments, basetime=basetime,
basedate=basedate)
# Compute optimal fields to store the digital signal, carry out adc, and set the fields.
record.set_d_features(do_adc = 1)
else:
# Create the Record object
record = Record(recordname=recordname, d_signals=d_signals, fs=fs,
fmt=fmt, units=units, signame = signames, adcgain = gain,
baseline=baseline, comments=comments, basetime=basetime,
basedate=basedate)
# Use d_signals to set the fields directly
record.set_d_features()
# Set default values of any missing field dependencies
record.setdefaults()
# Write the record files - header and associated dat
record.wrsamp()
# Time string parser for WFDB header - H(H):M(M):S(S(.sss)) format.
def parsetimestring(timestring):
times = re.findall("(?P<hours>\d{1,2}):(?P<minutes>\d{1,2}):(?P<seconds>\d{1,2}[.\d+]*)", timestring)
if not times:
raise ValueError("Invalid time string: "+timestring+". Acceptable format is: 'Hours:Minutes:Seconds'")
else:
hours, minutes, seconds = times[0]
if not hours or not minutes or not seconds:
raise ValueError("Invalid time string: "+timestring+". Acceptable format is: 'Hours:Minutes:Seconds'")
hours = int(hours)
minutes = int(minutes)
seconds = float(seconds)
if int(hours) >23:
raise ValueError('hours must be < 24')
elif hours<0:
raise ValueError('hours must be positive')
if minutes>59:
raise ValueError('minutes must be < 60')
elif minutes<0:
raise ValueError('minutes must be positive')
if seconds>59:
raise ValueError('seconds must be < 60')
elif seconds<0:
raise ValueError('seconds must be positive')
return (hours, minutes, seconds)
# Date string parser for WFDB header - DD/MM/YYYY
def parsedatestring(datestring):
dates = re.findall(r"(?P<day>\d{2})/(?P<month>\d{2})/(?P<year>\d{4})", datestring)
if not dates:
raise ValueError("Invalid date string. Acceptable format is: 'DD/MM/YYYY'")
else:
day, month, year = dates[0]
day = int(day)
month = int(month)
year = int(year)
if year<1:
raise ValueError('year must be positive')
if month<1 or month>12:
raise ValueError('month must be between 1 and 12')
if day not in range(1, monthrange(year, month)[1]+1):
raise ValueError('day does not exist for specified year and month')
return (day, month, year)
# Returns the unique elements in a list in the order that they appear.
# Also returns the indices of the original list that correspond to each output element.
def orderedsetlist(fulllist):
uniquelist = []
original_inds = {}
for i in range(0, len(fulllist)):
item = fulllist[i]
# new item
if item not in uniquelist:
uniquelist.append(item)
original_inds[item] = [i]
# previously seen item
else:
original_inds[item].append(i)
return uniquelist, original_inds
# Returns elements in a list without consecutive repeated values.
def orderednoconseclist(fulllist):
noconseclist = [fulllist[0]]
if len(fulllist) == 1:
return noconseclist
for i in fulllist:
if i!= noconseclist[-1]:
noconseclist.append(i)
return noconseclist
# *These downloading files gateway function rely on the Record/MultiRecord objects.
# They are placed here rather than in downloads.py in order to avoid circular imports
# Download WFDB files from a physiobank database
# This function only targets databases with WFDB records (EDF and MIT format).
# If the database doesn't have a 'RECORDS" file, it will fail.
def dldatabase(pbdb, dlbasedir, records = 'all', annotators = 'all' , keepsubdirs = True, overwrite = False):
"""Download WFDB record (and optionally annotation) files from a Physiobank database. The database
must contain a 'RECORDS' file in its base directory which lists its WFDB records.
Usage:
dldatabase(pbdb, dlbasedir, records = 'all', annotators = 'all' , keepsubdirs = True, overwrite = False)
Input arguments:
- pbdb (required): The Physiobank database directory to download.
eg. For database 'http://physionet.org/physiobank/database/mitdb', pbdb = 'mitdb'.
- dlbasedir (required): The full local directory path in which to download the files.
- records (default='all'): Specifier of the WFDB records to download. Is either a list of strings
which each specify a record, or 'all' to download all records listed in the database's RECORDS file.
eg. records = ['test01_00s', test02_45s] for database https://physionet.org/physiobank/database/macecgdb/
- annotators (default='all'): Specifier of the WFDB annotation file types to download along with
the record files. Is either None to skip downloading any annotations, 'all' to download all
annotation types as specified by the ANNOTATORS file, or a list of strings which each specify an
annotation extension.
eg. annotators = ['anI'] for database https://physionet.org/physiobank/database/prcp/
- keepsubdirs (default=True): Whether to keep the relative subdirectories of downloaded files
as they are organized in Physiobank (True), or to download all files into the same base directory (False).
- overwrite (default=False): If set to True, all files will be redownloaded regardless. If set to False,
existing files with the same name and relative subdirectory will be checked. If the local file is
the same size as the online file, the download is skipped. If the local file is larger, it will be deleted
and the file will be redownloaded. If the local file is smaller, the file will be assumed to be
partially downloaded and the remaining bytes will be downloaded and appended.
Example Usage:
import wfdb
wfdb.dldatabase('ahadb', os.getcwd())
"""
# Full url physiobank database
dburl = posixpath.join(downloads.dbindexurl, pbdb)
# Check if the database is valid
r = requests.get(dburl)
r.raise_for_status()
# Get the list of records
recordlist = downloads.getrecordlist(dburl, records)
# Get the annotator extensions
annotators = downloads.getannotators(dburl, annotators)
# All files to download (relative to the database's home directory)
allfiles = []
for rec in recordlist:
# Check out whether each record is in MIT or EDF format
if rec.endswith('.edf'):
allfiles.append(rec)
else:
# If MIT format, have to figure out all associated files
allfiles.append(rec+'.hea')
dirname, baserecname = os.path.split(rec)
record = rdheader(baserecname, pbdir = posixpath.join(pbdb, dirname))
# Single segment record
if isinstance(record, Record):
# Add all dat files of the segment
for file in record.filename:
allfiles.append(posixpath.join(dirname, file))
# Multi segment record
else:
for seg in record.segname:
# Skip empty segments
if seg == '~':
continue
# Add the header
allfiles.append(posixpath.join(dirname, seg+'.hea'))
# Layout specifier has no dat files
if seg.endswith('_layout'):
continue
# Add all dat files of the segment
recseg = rdheader(seg, pbdir = posixpath.join(pbdb, dirname))
for file in recseg.filename:
allfiles.append(posixpath.join(dirname, file))
# check whether the record has any requested annotation files
if annotators is not None:
for a in annotators:
annfile = rec+'.'+a
url = posixpath.join(downloads.dbindexurl, pbdb, annfile)
rh = requests.head(url)
if rh.status_code != 404:
allfiles.append(annfile)
dlinputs = [(os.path.split(file)[1], os.path.split(file)[0], pbdb, dlbasedir, keepsubdirs, overwrite) for file in allfiles]
# Make any required local directories
downloads.makelocaldirs(dlbasedir, dlinputs, keepsubdirs)
print('Downloading files...')
# Create multiple processes to download files.
# Limit to 2 connections to avoid overloading the server
pool = multiprocessing.Pool(processes=2)
pool.map(downloads.dlpbfile, dlinputs)
print('Finished downloading files')
return
# Download specific files from a physiobank database
def dldatabasefiles(pbdb, dlbasedir, files, keepsubdirs = True, overwrite = False):
"""Download specified files from a Physiobank database.
Usage:
dldatabasefiles(pbdb, dlbasedir, files, keepsubdirs = True, overwrite = False):
Input arguments:
- pbdb (required): The Physiobank database directory to download.
eg. For database 'http://physionet.org/physiobank/database/mitdb', pbdb = 'mitdb'.
- dlbasedir (required): The full local directory path in which to download the files.
- files (required): A list of strings specifying the file names to download relative to the database
base directory
- keepsubdirs (default=True): Whether to keep the relative subdirectories of downloaded files
as they are organized in Physiobank (True), or to download all files into the same base directory (False).
- overwrite (default=False): If set to True, all files will be redownloaded regardless. If set to False,
existing files with the same name and relative subdirectory will be checked. If the local file is
the same size as the online file, the download is skipped. If the local file is larger, it will be deleted
and the file will be redownloaded. If the local file is smaller, the file will be assumed to be
partially downloaded and the remaining bytes will be downloaded and appended.
Example Usage:
import wfdb
wfdb.dldatabasefiles('ahadb', os.getcwd(), ['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea', 'data/001a.dat'])
"""
# Full url physiobank database
dburl = posixpath.join(downloads.dbindexurl, pbdb)
# Check if the database is valid
r = requests.get(dburl)
r.raise_for_status()
# Construct the urls to download
dlinputs = [(os.path.split(file)[1], os.path.split(file)[0], pbdb, dlbasedir, keepsubdirs, overwrite) for file in files]
# Make any required local directories
downloads.makelocaldirs(dlbasedir, dlinputs, keepsubdirs)
print('Downloading files...')
# Create multiple processes to download files.
# Limit to 2 connections to avoid overloading the server
pool = multiprocessing.Pool(processes=2)
pool.map(downloads.dlpbfile, dlinputs)
print('Finished downloading files')
return | en | 0.789103 | # For wrheader(), all fields must be already filled in and cohesive with one another other. The signals field will not be used. # For wrsamp(), the field to use will be d_signals (which is allowed to be empty for 0 channel records). # set_p_features and set_d_features use characteristics of the p_signals or d_signals field to fill in other header fields. # These are separate from another method 'setdefaults' which the user may call to set default header fields # The checkfieldcohesion() function will be called in wrheader which checks all the header fields. # The checksignalcohesion() function will be called in wrsamp in wrdat to check the d_signal against the header fields. # The base WFDB class to extend to create Record and MultiRecord. Contains shared helper functions and fields. # Constructor # Check whether a single field is valid in its basic form. Does not check compatibility with other fields. # ch is only used for signal specification fields, specifying the channels to check. Other channels # can be None. # Be aware that this function is not just called from wrheader. # Check that the field is present # Check the type of the field (and of its elements if it should be a list) # Expand to make sure all channels must have present field # Individual specific field checks: # Check shape # Check dtype # Check shape # Check shape # Check dtype # Check shape #elif field == 'segments': # Nothing to check here. # Record specification fields # Allow letters, digits, hyphens, and underscores. # Signal specification fields. Lists of elements to check. # The channel element is allowed to be None # Check for filename characters # Check that dat files are grouped together # Currently original WFDB library only has 4 bytes for baseline. # elif field == 'adczero': nothing to check here # elif field == 'initvalue': nothing to check here # elif field == 'checksum': nothing to check here # Segment specification fields # Segment names must be alphanumerics or just a single '~' # For records with more than 1 segment, the first segment may be # the layout specification segment with a length of 0 # Comment field # Allow empty string comment lines # Check the data type of the specified field. # ch is used for signal spec fields # Some fields are lists. This must be checked, along with their elements. # Record specification field. Nonlist. # Signal specification field. List. # Segment specification field. List. All elements cannot be None # Comments field. List. Elements cannot be None # Signals field. # Segments field. List. Elements may be None. # Ensure that input read parameters are valid for the record # Data Type Check # Duration Ranges # Channel Ranges # Cannot expand multiple samples/frame for multi-segment records # If m2s == True, Physical must be true. There is no # meaningful representation of digital signals transferred # from individual segments. # Check the item type. Vary the print message regarding whether the item can be None. # Helper to checkfieldtype # channels is a list of booleans indicating whether the field's channel must be present (1) or may be None (0) # and is not just for signal specification fields # Checking the list # First make sure the item is a list # Expand to make sure all channels must have present field # Expand to allow any channel to be None # The field must exist for the channel # The field may be None for the channel # Single scalar to check The class representing WFDB headers, and single segment WFDB records. Record objects can be created using the constructor, by reading a WFDB header with 'rdheader', or a WFDB record (header and associated dat files) with rdsamp' or 'srdsamp'. The attributes of the Record object give information about the record as specified by https://www.physionet.org/physiotools/wag/header-5.htm In addition, the d_signals and p_signals attributes store the digital and physical signals of WFDB records with at least one channel. Contructor function: def __init__(self, p_signals=None, d_signals=None, recordname=None, nsig=None, fs=None, counterfreq=None, basecounter=None, siglen=None, basetime=None, basedate=None, filename=None, fmt=None, sampsperframe=None, skew=None, byteoffset=None, adcgain=None, baseline=None, units=None, adcres=None, adczero=None, initvalue=None, checksum=None, blocksize=None, signame=None, comments=None) Example Usage: import wfdb record = wfdb.Record(recordname='r1', fs=250, nsig=2, siglen=1000, filename=['r1.dat','r1.dat']) # Constructor # Note the lack of 'nseg' field. Single segment records cannot have this field. Even nseg = 1 makes # the header a multi-segment header. # Equal comparison operator for objects of this type # Write a wfdb header file and associated dat files if any. # Uses d_signals (expanded=False) or e_d_signals to write the samples # Perform field validity and cohesion checks, and write the header file. # Perform signal validity and cohesion checks, and write the associated dat files. # Arrange/edit object fields to reflect user channel and/or signal range input # Account for case when signals are expanded # Rearrange signal specification fields # Expanded signals - multiple samples per frame. # Checksum and initvalue to be updated if present # unless the whole signal length was input # MxN numpy array d_signals # Checksum and initvalue to be updated if present # unless the whole signal length was input # Update record specification parameters # Important that these get updated after^^ # Class for multi segment WFDB records. The class representing multi-segment WFDB records. MultiRecord objects can be created using the constructor, or by reading a multi-segment WFDB record using 'rdsamp' with the 'm2s' (multi to single) input parameter set to False. The attributes of the MultiRecord object give information about the entire record as specified by https://www.physionet.org/physiotools/wag/header-5.htm In addition, the 'segments' parameter is a list of Record objects representing each individual segment, or 'None' representing empty segments, of the entire multi-segment record. Noteably, this class has no attribute representing the signals as a whole. The 'multi_to_single' instance method can be called on MultiRecord objects to return a single segment representation of the record as a Record object. The resulting Record object will have its 'p_signals' field set. Contructor function: def __init__(self, segments=None, layout=None, recordname=None, nsig=None, fs=None, counterfreq=None, basecounter=None, siglen=None, basetime=None, basedate=None, segname=None, seglen=None, comments=None, signame=None, sigsegments=None) Example Usage: import wfdb recordM = wfdb.MultiRecord(recordname='rm', fs=50, nsig=8, siglen=9999, segname=['rm_1', '~', rm_2'], seglen=[800, 200, 900]) recordL = wfdb.rdsamp('s00001-2896-10-10-00-31', m2s = False) recordL = recordL.multi_to_single() # Constructor # Write a multi-segment header, along with headers and dat files for all segments # Perform field validity and cohesion checks, and write the header file. # Perform record validity and cohesion checks, and write the associated segments. # Check the cohesion of the segments field with other fields used to write the record # Check that nseg is equal to the length of the segments field # If segment 0 is a layout specification record, check that its file names are all == '~'' # Check that sampling frequencies all match the one in the master header # Check the signal length of the segment against the corresponding seglen field # No need to check the sum of siglens from each segment object against siglen # Already effectively done it when checking sum(seglen) against siglen # Determine the segments and the samples # within each segment that have to be read in a # multi-segment record. Called during rdsamp. # The starting segment with actual samples # Cumulative sum of segment lengths (ignoring layout segment) # Get first segment # Get final segment # Add 1 for variable layout records # Obtain the sampfrom and sampto to read for each segment # Only one segment to read # The segment's first sample number relative to the entire record # More than one segment to read # Starting sample for first segment. # End sample for last segment # Get the channel numbers to be read from each segment # Fixed layout. All channels are the same. # Should we bother here with skipping empty segments? # They won't be read anyway. # Variable layout: figure out channels by matching record names # The overall layout signal names # The wanted signals # For each segment ... # Skip empty segments # Get the signal names of the current segment # Arrange/edit object fields to reflect user channel and/or signal range input # Update seglen values for relevant segments # Update record specification parameters # Get rid of the segments and segment line parameters # outside the desired segment range # Keep the layout specifier segment # Update number of segments # Convert a MultiRecord object to a Record object # The fields to transfer to the new object # Remove multirecord fields # The output physical signals # Get the physical samples from each segment # Start and end samples in the overall array # to place the segment samples into # Get the signal names and units from the first segment # For variable layout, have to get channels by name # Get the signal names from the layout segment # Empty segment # Non-empty segment # Figure out if there are any channels wanted and # the output channels they are to be stored in # Segment contains no wanted channels. Fill with nans. # Segment contains wanted channel(s). Transfer samples. # This statement is necessary in case this function is not called # directly from rdsamp with m2s=True. # Create the single segment Record object and set attributes #------------------- Reading Records -------------------# # Read a WFDB single or multi segment record. Return a Record or MultiRecord object Read a WFDB record and return the signal and record descriptors as attributes in a Record or MultiRecord object. Usage: record = rdsamp(recordname, sampfrom=0, sampto=None, channels=None, physical=True, pbdir = None, m2s=True, smoothframes = True, ignoreskew=False) Input arguments: - recordname (required): The name of the WFDB record to be read (without any file extensions). If the argument contains any path delimiter characters, the argument will be interpreted as PATH/baserecord and the data files will be searched for in the local path. - sampfrom (default=0): The starting sample number to read for each channel. - sampto (default=None): The sample number at which to stop reading for each channel. - channels (default=all): Indices specifying the channel to be returned. - physical (default=True): Flag that specifies whether to return signals in physical units in the p_signals field (True), or digital units in the d_signals field (False). - pbdir (default=None): Option used to stream data from Physiobank. The Physiobank database directory from which to find the required record files. eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb', pbdir = 'mitdb'. - m2s (default=True): Flag used when reading multi-segment records. Specifies whether to directly return a wfdb MultiRecord object (False), or to convert it into and return a wfdb Record object (True). - smoothframes (default=True): Flag used when reading records with signals having multiple samples per frame. Specifies whether to smooth the samples in signals with more than one sample per frame and return an mxn uniform numpy array as the d_signals or p_signals field (True), or to return a list of 1d numpy arrays containing every expanded sample as the e_d_signals or e_p_signals field (False). - ignoreskew (default=False): Flag used when reading records with at least one skewed signal. Specifies whether to apply the skew to align the signals in the output variable (False), or to ignore the skew field and load in all values contained in the dat files unaligned (True). - returnres (default=64): The numpy array dtype of the returned signals. Options are: 64, 32, 16, and 8, where the value represents the numpy int or float dtype. Note that the value cannot be 8 when physical is True since there is no float8 format. Output argument: - record: The wfdb Record or MultiRecord object representing the contents of the record read. Note: If a signal range or channel selection is specified when calling this function, the the resulting attributes of the returned object will be set to reflect the section of the record that is actually read, rather than necessarily what is in the header file. For example, if channels = [0, 1, 2] is specified when reading a 12 channel record, the 'nsig' attribute will be 3, not 12. Note: The 'srdsamp' function exists as a simple alternative to 'rdsamp' for the most common purpose of extracting the physical signals and a few important descriptor fields. 'srdsamp' returns two arguments: the physical signals array, and a dictionary of a few select fields, a subset of the original wfdb Record attributes. Example Usage: import wfdb ecgrecord = wfdb.rdsamp('sampledata/test01_00s', sampfrom=800, channels = [1,3]) # Read the header fields into the appropriate record object # Set defaults for sampto and channels input variables # Ensure that input fields are valid for the record # A single segment record # Only 1 sample/frame, or frames are smoothed. Return uniform numpy array # Read signals from the associated dat files that contain wanted channels # Arrange/edit the object fields to reflect user channel and/or signal range input # Perform inplace dac to get physical signal # Return each sample of the signals with multiple samples per frame # Arrange/edit the object fields to reflect user channel and/or signal range input # Perform dac to get physical signal # A multi segment record # We can make another rdsamp function (called rdsamp_segment) to call # for individual segments to deal with the skews. # Strategy: # 1. Read the required segments and store them in # Record objects. # 2. Update the parameters of the objects to reflect # the state of the sections read. # 3. Update the parameters of the overall MultiRecord # object to reflect the state of the individual segments. # 4. If specified, convert the MultiRecord object # into a single Record object. # Segments field is a list of Record objects # Empty segments store None. # Variable layout # Read the layout specification header # Fixed layout # The segment numbers and samples within each segment to read. # The signals within each segment to read # Read the desired samples in the relevant segments # Empty segment or segment with no relevant channels # Arrange the fields of the overall object to reflect user input # Convert object into a single segment Record object # Perform dtype conversion if necessary # Read a WFDB header. Return a Record object or MultiRecord object Read a WFDB header file and return the record descriptors as attributes in a Record object Usage: record = rdheader(recordname, pbdir = None, rdsegments = False) Input arguments: - recordname (required): The name of the WFDB record to be read (without any file extensions). If the argument contains any path delimiter characters, the argument will be interpreted as PATH/baserecord and the header file will be searched for in the local path. - pbdir (default=None): Option used to stream data from Physiobank. The Physiobank database directory from which to find the required record files. eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb', pbdir = 'mitdb'. - rdsegments (default=False): Boolean flag used when reading multi-segment headers. If True, segment headers will also be read (into the record object's 'segments' field). Output argument: - record: The wfdb Record or MultiRecord object representing the contents of the header read. Example Usage: import wfdb ecgrecord = wfdb.rdheader('sampledata/test01_00s', sampfrom=800, channels = [1,3]) # Read the header file. Separate comment and non-comment lines # Get fields from record line # Processing according to whether the header is single or multi segment # Single segment header - Process signal specification lines # Create a single-segment WFDB record object # There is at least one channel # Read the fields from the signal lines # Set the object's signal line fields # Set the object's record line fields # Multi segment header - Process segment specification lines # Create a multi-segment WFDB record object # Read the fields from the segment lines # Set the object's segment line fields # Set the objects' record line fields # Determine whether the record is fixed or variable # If specified, read the segment headers # Get the base record name (could be empty) # Fill in the signame attribute # Fill in the sigsegments attribute # Set the comments field #')) # Given some wanted signal names, and the signal names contained # in a record, return the indices of the record channels that intersect. # Remember that the wanted signal names are already in order specified in user input channels. So it's good! # A simple version of rdsamp for ease of use # Return the physical signals and a few essential fields Read a WFDB record and return the physical signal and a few important descriptor fields Usage: signals, fields = srdsamp(recordname, sampfrom=0, sampto=None, channels=None, pbdir=None) Input arguments: - recordname (required): The name of the WFDB record to be read (without any file extensions). If the argument contains any path delimiter characters, the argument will be interpreted as PATH/baserecord and the data files will be searched for in the local path. - sampfrom (default=0): The starting sample number to read for each channel. - sampto (default=None): The sample number at which to stop reading for each channel. - channels (default=all): Indices specifying the channel to be returned. Output arguments: - signals: A 2d numpy array storing the physical signals from the record. - fields: A dictionary specifying several key attributes of the read record: - fs: The sampling frequency of the record - units: The units for each channel - signame: The signal name for each channel - comments: Any comments written in the header Note: If a signal range or channel selection is specified when calling this function, the the resulting attributes of the returned object will be set to reflect the section of the record that is actually read, rather than necessarily what is in the header file. For example, if channels = [0, 1, 2] is specified when reading a 12 channel record, the 'nsig' attribute will be 3, not 12. Note: The 'rdsamp' function is the base function upon which this one is built. It returns all attributes present, along with the signals, as attributes in a wfdb.Record object. The function, along with the returned data type, have more options than 'srdsamp' for users who wish to more directly manipulate WFDB files. Example Usage: import wfdb sig, fields = wfdb.srdsamp('sampledata/test01_00s', sampfrom=800, channels = [1,3]) #------------------- /Reading Records -------------------# # Function for writing single segment records Write a single segment WFDB record, creating a WFDB header file and any associated dat files. Usage: wrsamp(recordname, fs, units, signames, p_signals = None, d_signals=None, fmt = None, gain = None, baseline = None, comments = None) Input arguments: - recordname (required): The string name of the WFDB record to be written (without any file extensions). - fs (required): The numerical sampling frequency of the record. - units (required): A list of strings giving the units of each signal channel. - signames (required): A list of strings giving the signal name of each signal channel. - p_signals (default=None): An MxN 2d numpy array, where M is the signal length. Gives the physical signal values intended to be written. Either p_signals or d_signals must be set, but not both. If p_signals is set, this method will use it to perform analogue-digital conversion, writing the resultant digital values to the dat file(s). If fmt is set, gain and baseline must be set or unset together. If fmt is unset, gain and baseline must both be unset. - d_signals (default=None): An MxN 2d numpy array, where M is the signal length. Gives the digital signal values intended to be directly written to the dat file(s). The dtype must be an integer type. Either p_signals or d_signals must be set, but not both. In addition, if d_signals is set, fmt, gain and baseline must also all be set. - fmt (default=None): A list of strings giving the WFDB format of each file used to store each channel. Accepted formats are: "80","212","16","24", and "32". There are other WFDB formats but this library will not write (though it will read) those file types. - gain (default=None): A list of integers specifying the ADC gain. - baseline (default=None): A list of integers specifying the digital baseline. - comments (default=None): A list of string comments to be written to the header file. - basetime (default=None): A string of the record's start time in 24h HH:MM:SS(.ms) format. - basedate (default=None): A string of the record's start date in DD/MM/YYYY format. Note: This gateway function was written to enable a simple way to write WFDB record files using the most frequently used parameters. Therefore not all WFDB fields can be set via this function. For more control over attributes, create a wfdb.Record object, manually set its attributes, and call its wrsamp() instance method. If you choose this more advanced method, see also the setdefaults, set_d_features, and set_p_features instance methods to help populate attributes. Example Usage (with the most common scenario of input parameters): import wfdb # Read part of a record from Physiobank sig, fields = wfdb.srdsamp('a103l', sampfrom = 50000, channels = [0,1], pbdir = 'challenge/2015/training') # Write a local WFDB record (manually inserting fields) wfdb.wrsamp('ecgrecord', fs = 250, units = ['mV', 'mV'], signames = ['I', 'II'], p_signals = sig, fmt = ['16', '16']) # Check input field combinations # Depending on whether d_signals or p_signals was used, set other required features. # Create the Record object # Compute optimal fields to store the digital signal, carry out adc, and set the fields. # Create the Record object # Use d_signals to set the fields directly # Set default values of any missing field dependencies # Write the record files - header and associated dat # Time string parser for WFDB header - H(H):M(M):S(S(.sss)) format. # Date string parser for WFDB header - DD/MM/YYYY # Returns the unique elements in a list in the order that they appear. # Also returns the indices of the original list that correspond to each output element. # new item # previously seen item # Returns elements in a list without consecutive repeated values. # *These downloading files gateway function rely on the Record/MultiRecord objects. # They are placed here rather than in downloads.py in order to avoid circular imports # Download WFDB files from a physiobank database # This function only targets databases with WFDB records (EDF and MIT format). # If the database doesn't have a 'RECORDS" file, it will fail. Download WFDB record (and optionally annotation) files from a Physiobank database. The database must contain a 'RECORDS' file in its base directory which lists its WFDB records. Usage: dldatabase(pbdb, dlbasedir, records = 'all', annotators = 'all' , keepsubdirs = True, overwrite = False) Input arguments: - pbdb (required): The Physiobank database directory to download. eg. For database 'http://physionet.org/physiobank/database/mitdb', pbdb = 'mitdb'. - dlbasedir (required): The full local directory path in which to download the files. - records (default='all'): Specifier of the WFDB records to download. Is either a list of strings which each specify a record, or 'all' to download all records listed in the database's RECORDS file. eg. records = ['test01_00s', test02_45s] for database https://physionet.org/physiobank/database/macecgdb/ - annotators (default='all'): Specifier of the WFDB annotation file types to download along with the record files. Is either None to skip downloading any annotations, 'all' to download all annotation types as specified by the ANNOTATORS file, or a list of strings which each specify an annotation extension. eg. annotators = ['anI'] for database https://physionet.org/physiobank/database/prcp/ - keepsubdirs (default=True): Whether to keep the relative subdirectories of downloaded files as they are organized in Physiobank (True), or to download all files into the same base directory (False). - overwrite (default=False): If set to True, all files will be redownloaded regardless. If set to False, existing files with the same name and relative subdirectory will be checked. If the local file is the same size as the online file, the download is skipped. If the local file is larger, it will be deleted and the file will be redownloaded. If the local file is smaller, the file will be assumed to be partially downloaded and the remaining bytes will be downloaded and appended. Example Usage: import wfdb wfdb.dldatabase('ahadb', os.getcwd()) # Full url physiobank database # Check if the database is valid # Get the list of records # Get the annotator extensions # All files to download (relative to the database's home directory) # Check out whether each record is in MIT or EDF format # If MIT format, have to figure out all associated files # Single segment record # Add all dat files of the segment # Multi segment record # Skip empty segments # Add the header # Layout specifier has no dat files # Add all dat files of the segment # check whether the record has any requested annotation files # Make any required local directories # Create multiple processes to download files. # Limit to 2 connections to avoid overloading the server # Download specific files from a physiobank database Download specified files from a Physiobank database. Usage: dldatabasefiles(pbdb, dlbasedir, files, keepsubdirs = True, overwrite = False): Input arguments: - pbdb (required): The Physiobank database directory to download. eg. For database 'http://physionet.org/physiobank/database/mitdb', pbdb = 'mitdb'. - dlbasedir (required): The full local directory path in which to download the files. - files (required): A list of strings specifying the file names to download relative to the database base directory - keepsubdirs (default=True): Whether to keep the relative subdirectories of downloaded files as they are organized in Physiobank (True), or to download all files into the same base directory (False). - overwrite (default=False): If set to True, all files will be redownloaded regardless. If set to False, existing files with the same name and relative subdirectory will be checked. If the local file is the same size as the online file, the download is skipped. If the local file is larger, it will be deleted and the file will be redownloaded. If the local file is smaller, the file will be assumed to be partially downloaded and the remaining bytes will be downloaded and appended. Example Usage: import wfdb wfdb.dldatabasefiles('ahadb', os.getcwd(), ['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea', 'data/001a.dat']) # Full url physiobank database # Check if the database is valid # Construct the urls to download # Make any required local directories # Create multiple processes to download files. # Limit to 2 connections to avoid overloading the server | 2.573053 | 3 |
dipper_methods.py | dirac-institute/ZTF_Boyajian | 2 | 6633037 | import numpy as np
from scipy.ndimage import minimum_filter1d
def setup_pyximport():
import pyximport
pyximport.install(reload_support=True, setup_args={'include_dirs': np.get_include()})
class cython_function():
def __init__(self, module, name):
self.module = module
self.name = name
self.function = None
self.load_function()
def load_function(self):
setup_pyximport()
self.function = getattr(__import__(self.module), self.name)
def __call__(self, *args, **kwargs):
if self.function is None:
self.load_function()
return self.function(*args, **kwargs)
def __getstate__(self):
# Don't return the module so that each node has to recompile it itself.
state = self.__dict__.copy()
state['function'] = None
return state
def detect_dippers(mjd, mag, magerr, xpos, ypos, catflags, verbose=False,
return_mjd=False, num_sequential=3):
'''
a docstring
'''
# moved into here for lack of better place
group_observations = cython_function('dipper', 'group_observations')
if len(mjd) == 0:
if return_mjd:
return -1., float('nan')
else:
return -1.
mjd = np.array(mjd)
order = np.argsort(mjd)
# Convert everything to numpy arrays and sort them by MJD
sort_mjd = mjd[order]
sort_mag = np.array(mag)[order]
sort_magerr = np.array(magerr)[order]
sort_xpos = np.array(xpos)[order]
sort_ypos = np.array(ypos)[order]
sort_catflags = np.array(catflags)[order]
# Mask out bad or repeated observations.
pad_width = 20
x_border = 3072
y_border = 3080
mask = (
(np.abs(sort_mjd - np.roll(sort_mjd, 1)) > 1e-5)
& (sort_xpos > pad_width)
& (sort_xpos < x_border - pad_width)
& (sort_ypos > pad_width)
& (sort_ypos < y_border - pad_width)
& (sort_catflags == 0)
# In the oct19 data, some observations have a magerr of 0 and aren't flagged.
# This causes a world of problems, so throw them out.
& (sort_magerr > 0)
# In the oct19 data, a lot of dippers are the result of bad columns...
# Unfortunately, in this version of the ZTF data we don't know which amplifier
# everything came from. To get a reasonably clean sample (with some unnecessary
# attrition), we cut any observations that are in the "bad" x ranges.
& ((sort_xpos < 24) | (sort_xpos > 31))
& ((sort_xpos < 95) | (sort_xpos > 106))
& ((sort_xpos < 328) | (sort_xpos > 333))
& ((sort_xpos < 1169) | (sort_xpos > 1177))
& ((sort_xpos < 1249) | (sort_xpos > 1257))
& ((sort_xpos < 1339) | (sort_xpos > 1349))
& ((sort_xpos < 2076) | (sort_xpos > 2100))
& ((sort_xpos < 2521) | (sort_xpos > 2537))
& ((sort_xpos < 2676) | (sort_xpos > 2682))
& ((sort_xpos < 2888) | (sort_xpos > 2895))
)
if np.sum(mask) < 10:
# Require at least 10 observations to have reasonable statistics.
if return_mjd:
return -1., float('nan')
else:
return -1.
mask_mjd = sort_mjd[mask]
mask_mag = sort_mag[mask]
mask_magerr = sort_magerr[mask]
# Unused for now, so don't bother calculating them.
# mask_xpos = sort_xpos[mask]
# mask_ypos = sort_ypos[mask]
# mask_catflags = sort_catflags[mask]
use_mjd, use_mag, use_magerr = group_observations(mask_mjd, mask_mag, mask_magerr)
# For well-measured observations, use the core standard deviation. For poorly
# measured ones, use the measured standard deviation. The core standard deviation
# should be very similar to the measured ones for stable light curves, so we
# shouldn't be adding these in quadrature. Instead, we take whichever value is
# larger.
#core_std = np.std(use_mag)
# NMAD
core_std = 1.4826 * np.nanmedian(np.abs(use_mag - np.nanmedian(use_mag)))
use_magerr[use_magerr < core_std] = core_std
scores = (use_mag - np.median(use_mag)) / use_magerr
# Get the minimum score for a run.
filtered_scores = minimum_filter1d(scores, num_sequential, mode='constant')
max_loc = np.argmax(filtered_scores)
result = float(filtered_scores[max_loc])
max_mjd = use_mjd[max_loc]
if verbose:
print("Max mjd: ", max_mjd)
if return_mjd:
return result, max_mjd
else:
return result
def detect_dippers_row(row, band='r', *args, **kwargs):
return detect_dippers(row[f'mjd_{band}'], row[f'mag_{band}'],
row[f'magerr_{band}'], row[f'xpos_{band}'], row[f'ypos_{band}'],
row[f'catflags_{band}'], *args, **kwargs) | import numpy as np
from scipy.ndimage import minimum_filter1d
def setup_pyximport():
import pyximport
pyximport.install(reload_support=True, setup_args={'include_dirs': np.get_include()})
class cython_function():
def __init__(self, module, name):
self.module = module
self.name = name
self.function = None
self.load_function()
def load_function(self):
setup_pyximport()
self.function = getattr(__import__(self.module), self.name)
def __call__(self, *args, **kwargs):
if self.function is None:
self.load_function()
return self.function(*args, **kwargs)
def __getstate__(self):
# Don't return the module so that each node has to recompile it itself.
state = self.__dict__.copy()
state['function'] = None
return state
def detect_dippers(mjd, mag, magerr, xpos, ypos, catflags, verbose=False,
return_mjd=False, num_sequential=3):
'''
a docstring
'''
# moved into here for lack of better place
group_observations = cython_function('dipper', 'group_observations')
if len(mjd) == 0:
if return_mjd:
return -1., float('nan')
else:
return -1.
mjd = np.array(mjd)
order = np.argsort(mjd)
# Convert everything to numpy arrays and sort them by MJD
sort_mjd = mjd[order]
sort_mag = np.array(mag)[order]
sort_magerr = np.array(magerr)[order]
sort_xpos = np.array(xpos)[order]
sort_ypos = np.array(ypos)[order]
sort_catflags = np.array(catflags)[order]
# Mask out bad or repeated observations.
pad_width = 20
x_border = 3072
y_border = 3080
mask = (
(np.abs(sort_mjd - np.roll(sort_mjd, 1)) > 1e-5)
& (sort_xpos > pad_width)
& (sort_xpos < x_border - pad_width)
& (sort_ypos > pad_width)
& (sort_ypos < y_border - pad_width)
& (sort_catflags == 0)
# In the oct19 data, some observations have a magerr of 0 and aren't flagged.
# This causes a world of problems, so throw them out.
& (sort_magerr > 0)
# In the oct19 data, a lot of dippers are the result of bad columns...
# Unfortunately, in this version of the ZTF data we don't know which amplifier
# everything came from. To get a reasonably clean sample (with some unnecessary
# attrition), we cut any observations that are in the "bad" x ranges.
& ((sort_xpos < 24) | (sort_xpos > 31))
& ((sort_xpos < 95) | (sort_xpos > 106))
& ((sort_xpos < 328) | (sort_xpos > 333))
& ((sort_xpos < 1169) | (sort_xpos > 1177))
& ((sort_xpos < 1249) | (sort_xpos > 1257))
& ((sort_xpos < 1339) | (sort_xpos > 1349))
& ((sort_xpos < 2076) | (sort_xpos > 2100))
& ((sort_xpos < 2521) | (sort_xpos > 2537))
& ((sort_xpos < 2676) | (sort_xpos > 2682))
& ((sort_xpos < 2888) | (sort_xpos > 2895))
)
if np.sum(mask) < 10:
# Require at least 10 observations to have reasonable statistics.
if return_mjd:
return -1., float('nan')
else:
return -1.
mask_mjd = sort_mjd[mask]
mask_mag = sort_mag[mask]
mask_magerr = sort_magerr[mask]
# Unused for now, so don't bother calculating them.
# mask_xpos = sort_xpos[mask]
# mask_ypos = sort_ypos[mask]
# mask_catflags = sort_catflags[mask]
use_mjd, use_mag, use_magerr = group_observations(mask_mjd, mask_mag, mask_magerr)
# For well-measured observations, use the core standard deviation. For poorly
# measured ones, use the measured standard deviation. The core standard deviation
# should be very similar to the measured ones for stable light curves, so we
# shouldn't be adding these in quadrature. Instead, we take whichever value is
# larger.
#core_std = np.std(use_mag)
# NMAD
core_std = 1.4826 * np.nanmedian(np.abs(use_mag - np.nanmedian(use_mag)))
use_magerr[use_magerr < core_std] = core_std
scores = (use_mag - np.median(use_mag)) / use_magerr
# Get the minimum score for a run.
filtered_scores = minimum_filter1d(scores, num_sequential, mode='constant')
max_loc = np.argmax(filtered_scores)
result = float(filtered_scores[max_loc])
max_mjd = use_mjd[max_loc]
if verbose:
print("Max mjd: ", max_mjd)
if return_mjd:
return result, max_mjd
else:
return result
def detect_dippers_row(row, band='r', *args, **kwargs):
return detect_dippers(row[f'mjd_{band}'], row[f'mag_{band}'],
row[f'magerr_{band}'], row[f'xpos_{band}'], row[f'ypos_{band}'],
row[f'catflags_{band}'], *args, **kwargs) | en | 0.895893 | # Don't return the module so that each node has to recompile it itself. a docstring # moved into here for lack of better place # Convert everything to numpy arrays and sort them by MJD # Mask out bad or repeated observations. # In the oct19 data, some observations have a magerr of 0 and aren't flagged. # This causes a world of problems, so throw them out. # In the oct19 data, a lot of dippers are the result of bad columns... # Unfortunately, in this version of the ZTF data we don't know which amplifier # everything came from. To get a reasonably clean sample (with some unnecessary # attrition), we cut any observations that are in the "bad" x ranges. # Require at least 10 observations to have reasonable statistics. # Unused for now, so don't bother calculating them. # mask_xpos = sort_xpos[mask] # mask_ypos = sort_ypos[mask] # mask_catflags = sort_catflags[mask] # For well-measured observations, use the core standard deviation. For poorly # measured ones, use the measured standard deviation. The core standard deviation # should be very similar to the measured ones for stable light curves, so we # shouldn't be adding these in quadrature. Instead, we take whichever value is # larger. #core_std = np.std(use_mag) # NMAD # Get the minimum score for a run. | 2.332965 | 2 |
data/sampling/sample_cord-19.py | dartar/habeas-corpus | 5 | 6633038 | import csv
import pandas
import random
SAMPLE_COUNT = 100
i = 0
with open('../cord-19/CORD19_software_mentions.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file)
row_count = sum(1 for row in csv_reader)
rand_ints = random.sample(range(1, row_count), SAMPLE_COUNT)
with open('output.csv', 'w') as output:
output_writer = csv.writer(output, delimiter=',')
csv_file.seek(0)
for row in csv_reader:
if i == 0 or i in rand_ints:
output_writer.writerow(row)
i += 1
| import csv
import pandas
import random
SAMPLE_COUNT = 100
i = 0
with open('../cord-19/CORD19_software_mentions.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file)
row_count = sum(1 for row in csv_reader)
rand_ints = random.sample(range(1, row_count), SAMPLE_COUNT)
with open('output.csv', 'w') as output:
output_writer = csv.writer(output, delimiter=',')
csv_file.seek(0)
for row in csv_reader:
if i == 0 or i in rand_ints:
output_writer.writerow(row)
i += 1
| none | 1 | 3.005735 | 3 |
|
H3/bundle/q1-starter.py | Cauchemare/CS224W_2020_Solutions | 1 | 6633039 | import snap
import numpy as np
import matplotlib.pyplot as plt
def load_graph(name):
'''
Helper function to load graphs.
Use "epinions" for Epinions graph and "email" for Email graph.
Check that the respective .txt files are in the same folder as this script;
if not, change the paths below as required.
'''
if name == "epinions":
G = snap.LoadEdgeList(snap.PNGraph, "soc-Epinions1.txt", 0, 1)
elif name == 'email':
G = snap.LoadEdgeList(snap.PNGraph, "email-EuAll.txt", 0, 1)
else:
raise ValueError("Invalid graph: please use 'email' or 'epinions'.")
return G
def q1_1():
'''
You will have to run the inward and outward BFS trees for the
respective nodes and reason about whether they are in SCC, IN or OUT.
You may find the SNAP function GetBfsTree() to be useful here.
'''
##########################################################################
#TODO: Run outward and inward BFS trees from node 2018, compare sizes
#and comment on where node 2018 lies.
G = load_graph("email")
#Your code here:
def q1_1_sub(G,StartNId):
outBfs= G.GetBfsTree(StartNId,True,False)
inBfs = G.GetBfsTree(StartNId,False,True)
return outBfs.GetNodes(),inBfs.GetNodes()
email_result = q1_1_sub( G,2018)
print(G.GetMxSccSz())
print('total:{0},outgoing:{1},incoming:{2}'.format(G.GetNodes(),email_result[0],email_result[1] ))
##########################################################################
##########################################################################
#TODO: Run outward and inward BFS trees from node 224, compare sizes
#and comment on where node 224 lies.
G = load_graph("epinions")
#Your code here:
epinions_result= q1_1_sub( G,224)
print('total:{0},outgoing:{1},incoming:{2}'.format(G.GetNodes(),epinions_result[0],epinions_result[1] ))
print(G.GetMxSccSz())
##########################################################################
print ('2.1: Done!\n')
def q1_2():
'''
For each graph, get 100 random nodes and find the number of nodes in their
inward and outward BFS trees starting from each node. Plot the cumulative
number of nodes reached in the BFS runs, similar to the graph shown in
Broder et al. (see Figure in handout). You will need to have 4 figures,
one each for the inward and outward BFS for each of email and epinions.
Note: You may find the SNAP function GetRndNId() useful to get random
node IDs (for initializing BFS).
'''
##########################################################################
#TODO: See above.
#Your code here:
def q1_2_sub(G,NumIds):
rnd= snap.TRnd()
rnd.Randomize()
outNodesCnt= []
inNodesCnt =[]
for i in range (NumIds):
RndNId = G.GetRndNId(rnd)
BfsOutTree =G.GetBfsTree(RndNId,True,False)
outNodesCnt.append(BfsOutTree.GetNodes() )
BfsInTree =G.GetBfsTree(RndNId,False,True)
inNodesCnt.append( BfsInTree.GetNodes() )
return sorted(outNodesCnt), sorted(inNodesCnt)
def q1_2_plot(GType):
x= np.linspace(0,1,100,endpoint=False)
G = load_graph(GType)
print('total',G.GetNodes())
outNodesCnt,inNodesCnt =q1_2_sub(G,100)
fig,(ax1,ax2)= plt.subplots(1,2)
fig.suptitle(GType)
ax1.set_title('out')
ax1.plot(x,outNodesCnt)
ax2.set_title('in')
ax2.plot(x, inNodesCnt)
q1_2_plot('email')
q1_2_plot('epinions')
##########################################################################
print ('2.2: Done!\n')
def q1_3():
'''
For each graph, determine the size of the following regions:
DISCONNECTED
IN
OUT
SCC
TENDRILS + TUBES
You can use SNAP functions GetMxWcc() and GetMxScc() to get the sizes of
the largest WCC and SCC on each graph.
'''
##########################################################################
#TODO: See above.
#Your code here:
def q1_3_sub(GType):
G = load_graph(GType)
MxScc = G.GetMxScc()
MxWcc= G.GetMxWcc()
num_disconnected= G.GetNodes() - MxWcc.GetNodes()
num_scc= MxScc.GetNodes()
BfsTree_in = G.GetBfsTree(20,False,True)
BfsTree_out = G.GetBfsTree(20,True,False)
num_in = BfsTree_in.GetNodes()- num_scc
num_out= BfsTree_out.GetNodes()- num_scc
num_tubes= G.GetNodes() - num_scc-num_in -num_out -num_disconnected
print('DISCONNECTED {0},IN {1},OUT {2},SCC {3},TENDRILS+ TUBES :{4}'
.format( num_disconnected,num_in,num_out,num_scc, num_tubes ) )
q1_3_sub('email')
q1_3_sub('epinions')
##########################################################################
print ('2.3: Done!\n' )
def q1_4():
'''
For each graph, calculate the probability that a path exists between
two nodes chosen uniformly from the overall graph.
You can do this by choosing a large number of pairs of random nodes
and calculating the fraction of these pairs which are connected.
The following SNAP functions may be of help: GetRndNId(), GetShortPath()
'''
##########################################################################
#TODO: See above.
#Your code here:
def q1_4_sub(GType,num_iters= 10000):
rnd_start =snap.TRnd()
rnd_end = snap.TRnd()
rnd_start.Randomize()
rnd_end.Randomize()
G = load_graph(GType)
num_connected = 0
for i in range(num_iters):
SrcNId= G.GetRndNId(rnd_start)
DestNId= G.GetRndNId(rnd_end)
if G.GetShortPath(SrcNId,DestNId,True) != -1:
num_connected +=1
print('connected fraction :{0:.4f}'.format( num_connected/ num_iters ) )
q1_4_sub('email')
q1_4_sub('epinions')
##########################################################################
print ('2.4: Done!\n')
if __name__ == "__main__":
# q1_1()
# q1_2()
# q1_3()
'''
DISCONNECTED 40382,IN 151023,OUT 17900,SCC 34203,TENDRILS+ TUBES :21706
DISCONNECTED 2,IN 24236,OUT 15453,SCC 32223,TENDRILS+ TUBES :3965
'''
q1_4()
# print ("Done with Question 2!\n") | import snap
import numpy as np
import matplotlib.pyplot as plt
def load_graph(name):
'''
Helper function to load graphs.
Use "epinions" for Epinions graph and "email" for Email graph.
Check that the respective .txt files are in the same folder as this script;
if not, change the paths below as required.
'''
if name == "epinions":
G = snap.LoadEdgeList(snap.PNGraph, "soc-Epinions1.txt", 0, 1)
elif name == 'email':
G = snap.LoadEdgeList(snap.PNGraph, "email-EuAll.txt", 0, 1)
else:
raise ValueError("Invalid graph: please use 'email' or 'epinions'.")
return G
def q1_1():
'''
You will have to run the inward and outward BFS trees for the
respective nodes and reason about whether they are in SCC, IN or OUT.
You may find the SNAP function GetBfsTree() to be useful here.
'''
##########################################################################
#TODO: Run outward and inward BFS trees from node 2018, compare sizes
#and comment on where node 2018 lies.
G = load_graph("email")
#Your code here:
def q1_1_sub(G,StartNId):
outBfs= G.GetBfsTree(StartNId,True,False)
inBfs = G.GetBfsTree(StartNId,False,True)
return outBfs.GetNodes(),inBfs.GetNodes()
email_result = q1_1_sub( G,2018)
print(G.GetMxSccSz())
print('total:{0},outgoing:{1},incoming:{2}'.format(G.GetNodes(),email_result[0],email_result[1] ))
##########################################################################
##########################################################################
#TODO: Run outward and inward BFS trees from node 224, compare sizes
#and comment on where node 224 lies.
G = load_graph("epinions")
#Your code here:
epinions_result= q1_1_sub( G,224)
print('total:{0},outgoing:{1},incoming:{2}'.format(G.GetNodes(),epinions_result[0],epinions_result[1] ))
print(G.GetMxSccSz())
##########################################################################
print ('2.1: Done!\n')
def q1_2():
'''
For each graph, get 100 random nodes and find the number of nodes in their
inward and outward BFS trees starting from each node. Plot the cumulative
number of nodes reached in the BFS runs, similar to the graph shown in
Broder et al. (see Figure in handout). You will need to have 4 figures,
one each for the inward and outward BFS for each of email and epinions.
Note: You may find the SNAP function GetRndNId() useful to get random
node IDs (for initializing BFS).
'''
##########################################################################
#TODO: See above.
#Your code here:
def q1_2_sub(G,NumIds):
rnd= snap.TRnd()
rnd.Randomize()
outNodesCnt= []
inNodesCnt =[]
for i in range (NumIds):
RndNId = G.GetRndNId(rnd)
BfsOutTree =G.GetBfsTree(RndNId,True,False)
outNodesCnt.append(BfsOutTree.GetNodes() )
BfsInTree =G.GetBfsTree(RndNId,False,True)
inNodesCnt.append( BfsInTree.GetNodes() )
return sorted(outNodesCnt), sorted(inNodesCnt)
def q1_2_plot(GType):
x= np.linspace(0,1,100,endpoint=False)
G = load_graph(GType)
print('total',G.GetNodes())
outNodesCnt,inNodesCnt =q1_2_sub(G,100)
fig,(ax1,ax2)= plt.subplots(1,2)
fig.suptitle(GType)
ax1.set_title('out')
ax1.plot(x,outNodesCnt)
ax2.set_title('in')
ax2.plot(x, inNodesCnt)
q1_2_plot('email')
q1_2_plot('epinions')
##########################################################################
print ('2.2: Done!\n')
def q1_3():
'''
For each graph, determine the size of the following regions:
DISCONNECTED
IN
OUT
SCC
TENDRILS + TUBES
You can use SNAP functions GetMxWcc() and GetMxScc() to get the sizes of
the largest WCC and SCC on each graph.
'''
##########################################################################
#TODO: See above.
#Your code here:
def q1_3_sub(GType):
G = load_graph(GType)
MxScc = G.GetMxScc()
MxWcc= G.GetMxWcc()
num_disconnected= G.GetNodes() - MxWcc.GetNodes()
num_scc= MxScc.GetNodes()
BfsTree_in = G.GetBfsTree(20,False,True)
BfsTree_out = G.GetBfsTree(20,True,False)
num_in = BfsTree_in.GetNodes()- num_scc
num_out= BfsTree_out.GetNodes()- num_scc
num_tubes= G.GetNodes() - num_scc-num_in -num_out -num_disconnected
print('DISCONNECTED {0},IN {1},OUT {2},SCC {3},TENDRILS+ TUBES :{4}'
.format( num_disconnected,num_in,num_out,num_scc, num_tubes ) )
q1_3_sub('email')
q1_3_sub('epinions')
##########################################################################
print ('2.3: Done!\n' )
def q1_4():
'''
For each graph, calculate the probability that a path exists between
two nodes chosen uniformly from the overall graph.
You can do this by choosing a large number of pairs of random nodes
and calculating the fraction of these pairs which are connected.
The following SNAP functions may be of help: GetRndNId(), GetShortPath()
'''
##########################################################################
#TODO: See above.
#Your code here:
def q1_4_sub(GType,num_iters= 10000):
rnd_start =snap.TRnd()
rnd_end = snap.TRnd()
rnd_start.Randomize()
rnd_end.Randomize()
G = load_graph(GType)
num_connected = 0
for i in range(num_iters):
SrcNId= G.GetRndNId(rnd_start)
DestNId= G.GetRndNId(rnd_end)
if G.GetShortPath(SrcNId,DestNId,True) != -1:
num_connected +=1
print('connected fraction :{0:.4f}'.format( num_connected/ num_iters ) )
q1_4_sub('email')
q1_4_sub('epinions')
##########################################################################
print ('2.4: Done!\n')
if __name__ == "__main__":
# q1_1()
# q1_2()
# q1_3()
'''
DISCONNECTED 40382,IN 151023,OUT 17900,SCC 34203,TENDRILS+ TUBES :21706
DISCONNECTED 2,IN 24236,OUT 15453,SCC 32223,TENDRILS+ TUBES :3965
'''
q1_4()
# print ("Done with Question 2!\n") | en | 0.421461 | Helper function to load graphs. Use "epinions" for Epinions graph and "email" for Email graph. Check that the respective .txt files are in the same folder as this script; if not, change the paths below as required. You will have to run the inward and outward BFS trees for the respective nodes and reason about whether they are in SCC, IN or OUT. You may find the SNAP function GetBfsTree() to be useful here. ########################################################################## #TODO: Run outward and inward BFS trees from node 2018, compare sizes #and comment on where node 2018 lies. #Your code here: ########################################################################## ########################################################################## #TODO: Run outward and inward BFS trees from node 224, compare sizes #and comment on where node 224 lies. #Your code here: ########################################################################## For each graph, get 100 random nodes and find the number of nodes in their inward and outward BFS trees starting from each node. Plot the cumulative number of nodes reached in the BFS runs, similar to the graph shown in Broder et al. (see Figure in handout). You will need to have 4 figures, one each for the inward and outward BFS for each of email and epinions. Note: You may find the SNAP function GetRndNId() useful to get random node IDs (for initializing BFS). ########################################################################## #TODO: See above. #Your code here: ########################################################################## For each graph, determine the size of the following regions: DISCONNECTED IN OUT SCC TENDRILS + TUBES You can use SNAP functions GetMxWcc() and GetMxScc() to get the sizes of the largest WCC and SCC on each graph. ########################################################################## #TODO: See above. #Your code here: ########################################################################## For each graph, calculate the probability that a path exists between two nodes chosen uniformly from the overall graph. You can do this by choosing a large number of pairs of random nodes and calculating the fraction of these pairs which are connected. The following SNAP functions may be of help: GetRndNId(), GetShortPath() ########################################################################## #TODO: See above. #Your code here: ########################################################################## # q1_1() # q1_2() # q1_3() DISCONNECTED 40382,IN 151023,OUT 17900,SCC 34203,TENDRILS+ TUBES :21706 DISCONNECTED 2,IN 24236,OUT 15453,SCC 32223,TENDRILS+ TUBES :3965 # print ("Done with Question 2!\n") | 3.048051 | 3 |
cifar/step2/main_finetune_model_decomposed.py | chatzikon/DNN-COMPRESSION | 9 | 6633040 | from __future__ import print_function
import argparse
import numpy as np
import os
import shutil
import torchnet as tnt
import time
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.optim.lr_scheduler import MultiStepLR
import random
import torch.nn as nn
import sys
sys.path.insert(0, "../step1/cifar100/")
from data_loader_100 import get_train_valid_loader, get_test_loader
sys.path.insert(0, "../step1/cifar10/")
from data_loader import get_train_valid_loader, get_test_loader
from models import *
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR training')
parser.add_argument('--dataset', type=str, default='cifar10',
help='training dataset (default: cifar10)')
parser.add_argument('--refine', default='./decomposed_models/models_finetuned/resnet56_cifar10/tucker2/1.71x/layer_groups:3/t.pth.tar', type=str, metavar='PATH',
help='path to the pruned model to be fine tuned')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=140, metavar='N',
help='number of epochs to train (default: 160)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=0, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save', default='./logs2', type=str, metavar='PATH',
help='path to save prune model (default: current directory)')
parser.add_argument('--arch', default='resnet', type=str,
help='architecture to use')
parser.add_argument('--depth', default=16, type=int,
help='depth of the neural network')
def seed_everything(SEED):
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED']=str(SEED)
def train(model,optimizer,train_loader,epoch):
model.train()
avg_loss = tnt.meter.AverageValueMeter()
train_acc = 0.
for batch_idx, (data, target,index) in enumerate(train_loader):
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
avg_loss.add(loss.item())
pred = output.data.max(1, keepdim=True)[1]
train_acc += pred.eq(target.data.view_as(pred)).cpu().sum()
loss.backward()
optimizer.step()
log_interval=100
if (batch_idx+1) % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.1f}%)]\tLoss: {:.6f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
epoch, (batch_idx+1) * len(data), len(train_loader.sampler),
100. * (batch_idx*len(target)) / len(train_loader.sampler), loss.item(), train_acc, (batch_idx+1) * len(data),
100. * float(train_acc) / ((batch_idx+1) * len(data))))
def test(model,test_loader):
model.eval()
test_loss = tnt.meter.AverageValueMeter()
correct = 0
for data, target, index in test_loader:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
loss = F.cross_entropy(output, target)
test_loss.add(loss.item()) # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
loss.item(), correct, len(test_loader.sampler),
100. * float(correct) / len(test_loader.sampler)))
return float(correct) / float(len(test_loader.sampler)), loss.item()
def save_checkpoint(state, is_best,counter, filepath):
torch.save(state, os.path.join(filepath, 'checkpointB.pth.tar'))
if is_best:
shutil.copyfile(os.path.join(filepath, 'checkpointB.pth.tar'), os.path.join(filepath, 'modelB_best_test_acc_'+str(counter)+'_'+str(state['best_prec1'])+'.pth.tar'))
def load_checkpoint(best,counter,filepath):
if os.path.isfile(os.path.join(filepath, 'modelB_best_test_acc_'+str(counter)+'_'+str(best)+'.pth.tar')):
print("=> loading checkpoint '{}'".format(os.path.join(filepath, 'modelB_best_test_acc_'+str(counter)+'_'+str(best)+'.pth.tar')))
checkpoint = torch.load(os.path.join(filepath, 'modelB_best_test_acc_'+str(counter)+'_'+str(best)+'.pth.tar'))
print("=> loaded checkpoint '{}' Prec1: {:f}".format(os.path.join(filepath, 'modelB_best_test_acc_'+str(counter)+'_'+str(best)+'.pth.tar'), best))
else:
print("=> no checkpoint found at '{}'".format(os.path.join(filepath, 'modelB_best_test_acc_'+str(counter)+'_'+str(best)+'.pth.tar')))
return checkpoint
def main():
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
seed_everything(args.seed)
if not os.path.exists(args.save):
os.makedirs(args.save)
if args.dataset == 'cifar10':
train_loader, valid_loader =get_train_valid_loader('../step1/cifar10/cifar10',
args.batch_size,
augment=True,
random_seed=args.seed,
valid_size=0.1,
shuffle=True,
num_workers=4,
pin_memory=True)
test_loader = get_test_loader('../step1/cifar10/cifar10',
args.batch_size,
shuffle=False,
num_workers=4,
pin_memory=True)
elif args.dataset == 'cifar100':
train_loader, valid_loader =get_train_valid_loader('../cifar100/cifar100',
args.batch_size,
augment=True,
random_seed=args.seed,
valid_size=0.1,
shuffle=True,
num_workers=4,
pin_memory=True)
test_loader = get_test_loader('../cifar100/cifar100',
args.batch_size,
shuffle=False,
num_workers=4,
pin_memory=True)
#load the compressed network
model=torch.load(args.refine)
#sometimes there is a problem with AvgPool2d of the loaded model, if this problem occur, uncomment this line
#model.avgpool = nn.AvgPool2d(kernel_size=8, stride=8, padding=0)
model.cuda()
optimizer = optim.SGD(model.parameters(), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)
scheduler = MultiStepLR(optimizer, milestones=[80,120], gamma=0.1)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
epoch = checkpoint['epoch']
print(epoch)
print("=> loaded checkpoint '{}' Prec1: {:f}"
.format(args.resume, best_prec1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
best_prec1 = 0.
for epoch in range(args.start_epoch, args.epochs):
start_time = time.time()
train(model, optimizer, train_loader, epoch)
scheduler.step(epoch)
print('learning rate')
print(optimizer.param_groups[0]['lr'])
prec1,_ = test(model,valid_loader)
prec1=float(prec1)
print(prec1)
print(best_prec1)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
print(is_best)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best, args.seed, filepath=args.save)
elapsed_time = time.time() - start_time
print(elapsed_time)
checkpoint_t = load_checkpoint(best_prec1, args.seed, args.save)
model.load_state_dict(checkpoint_t['state_dict'])
prec_f, _ = test(model, test_loader)
prec_f = float(prec_f)
best_prec1 = prec_f
is_best = True
save_checkpoint({
'epoch': args.epochs + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best, args.seed, filepath=args.save)
if __name__ == '__main__':
main()
| from __future__ import print_function
import argparse
import numpy as np
import os
import shutil
import torchnet as tnt
import time
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.optim.lr_scheduler import MultiStepLR
import random
import torch.nn as nn
import sys
sys.path.insert(0, "../step1/cifar100/")
from data_loader_100 import get_train_valid_loader, get_test_loader
sys.path.insert(0, "../step1/cifar10/")
from data_loader import get_train_valid_loader, get_test_loader
from models import *
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR training')
parser.add_argument('--dataset', type=str, default='cifar10',
help='training dataset (default: cifar10)')
parser.add_argument('--refine', default='./decomposed_models/models_finetuned/resnet56_cifar10/tucker2/1.71x/layer_groups:3/t.pth.tar', type=str, metavar='PATH',
help='path to the pruned model to be fine tuned')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=140, metavar='N',
help='number of epochs to train (default: 160)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=0, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save', default='./logs2', type=str, metavar='PATH',
help='path to save prune model (default: current directory)')
parser.add_argument('--arch', default='resnet', type=str,
help='architecture to use')
parser.add_argument('--depth', default=16, type=int,
help='depth of the neural network')
def seed_everything(SEED):
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED']=str(SEED)
def train(model,optimizer,train_loader,epoch):
model.train()
avg_loss = tnt.meter.AverageValueMeter()
train_acc = 0.
for batch_idx, (data, target,index) in enumerate(train_loader):
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
avg_loss.add(loss.item())
pred = output.data.max(1, keepdim=True)[1]
train_acc += pred.eq(target.data.view_as(pred)).cpu().sum()
loss.backward()
optimizer.step()
log_interval=100
if (batch_idx+1) % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.1f}%)]\tLoss: {:.6f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
epoch, (batch_idx+1) * len(data), len(train_loader.sampler),
100. * (batch_idx*len(target)) / len(train_loader.sampler), loss.item(), train_acc, (batch_idx+1) * len(data),
100. * float(train_acc) / ((batch_idx+1) * len(data))))
def test(model,test_loader):
model.eval()
test_loss = tnt.meter.AverageValueMeter()
correct = 0
for data, target, index in test_loader:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
loss = F.cross_entropy(output, target)
test_loss.add(loss.item()) # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
loss.item(), correct, len(test_loader.sampler),
100. * float(correct) / len(test_loader.sampler)))
return float(correct) / float(len(test_loader.sampler)), loss.item()
def save_checkpoint(state, is_best,counter, filepath):
torch.save(state, os.path.join(filepath, 'checkpointB.pth.tar'))
if is_best:
shutil.copyfile(os.path.join(filepath, 'checkpointB.pth.tar'), os.path.join(filepath, 'modelB_best_test_acc_'+str(counter)+'_'+str(state['best_prec1'])+'.pth.tar'))
def load_checkpoint(best,counter,filepath):
if os.path.isfile(os.path.join(filepath, 'modelB_best_test_acc_'+str(counter)+'_'+str(best)+'.pth.tar')):
print("=> loading checkpoint '{}'".format(os.path.join(filepath, 'modelB_best_test_acc_'+str(counter)+'_'+str(best)+'.pth.tar')))
checkpoint = torch.load(os.path.join(filepath, 'modelB_best_test_acc_'+str(counter)+'_'+str(best)+'.pth.tar'))
print("=> loaded checkpoint '{}' Prec1: {:f}".format(os.path.join(filepath, 'modelB_best_test_acc_'+str(counter)+'_'+str(best)+'.pth.tar'), best))
else:
print("=> no checkpoint found at '{}'".format(os.path.join(filepath, 'modelB_best_test_acc_'+str(counter)+'_'+str(best)+'.pth.tar')))
return checkpoint
def main():
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
seed_everything(args.seed)
if not os.path.exists(args.save):
os.makedirs(args.save)
if args.dataset == 'cifar10':
train_loader, valid_loader =get_train_valid_loader('../step1/cifar10/cifar10',
args.batch_size,
augment=True,
random_seed=args.seed,
valid_size=0.1,
shuffle=True,
num_workers=4,
pin_memory=True)
test_loader = get_test_loader('../step1/cifar10/cifar10',
args.batch_size,
shuffle=False,
num_workers=4,
pin_memory=True)
elif args.dataset == 'cifar100':
train_loader, valid_loader =get_train_valid_loader('../cifar100/cifar100',
args.batch_size,
augment=True,
random_seed=args.seed,
valid_size=0.1,
shuffle=True,
num_workers=4,
pin_memory=True)
test_loader = get_test_loader('../cifar100/cifar100',
args.batch_size,
shuffle=False,
num_workers=4,
pin_memory=True)
#load the compressed network
model=torch.load(args.refine)
#sometimes there is a problem with AvgPool2d of the loaded model, if this problem occur, uncomment this line
#model.avgpool = nn.AvgPool2d(kernel_size=8, stride=8, padding=0)
model.cuda()
optimizer = optim.SGD(model.parameters(), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)
scheduler = MultiStepLR(optimizer, milestones=[80,120], gamma=0.1)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
epoch = checkpoint['epoch']
print(epoch)
print("=> loaded checkpoint '{}' Prec1: {:f}"
.format(args.resume, best_prec1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
best_prec1 = 0.
for epoch in range(args.start_epoch, args.epochs):
start_time = time.time()
train(model, optimizer, train_loader, epoch)
scheduler.step(epoch)
print('learning rate')
print(optimizer.param_groups[0]['lr'])
prec1,_ = test(model,valid_loader)
prec1=float(prec1)
print(prec1)
print(best_prec1)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
print(is_best)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best, args.seed, filepath=args.save)
elapsed_time = time.time() - start_time
print(elapsed_time)
checkpoint_t = load_checkpoint(best_prec1, args.seed, args.save)
model.load_state_dict(checkpoint_t['state_dict'])
prec_f, _ = test(model, test_loader)
prec_f = float(prec_f)
best_prec1 = prec_f
is_best = True
save_checkpoint({
'epoch': args.epochs + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best, args.seed, filepath=args.save)
if __name__ == '__main__':
main()
| en | 0.798905 | # Training settings # sum up batch loss # get the index of the max log-probability #load the compressed network #sometimes there is a problem with AvgPool2d of the loaded model, if this problem occur, uncomment this line #model.avgpool = nn.AvgPool2d(kernel_size=8, stride=8, padding=0) | 2.239001 | 2 |
src/programy/extensions/admin/client.py | whackur/chatbot | 2 | 6633041 | """
Copyright (c) 2016-2018 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.extensions.base import Extension
class ClientAdminExtension(Extension):
# execute() is the interface that is called from the <extension> tag in the AIML
def execute(self, client_context, data):
YLogger.debug(client_context, "Client Admin - [%s]", data)
try:
commands = data.split()
if commands[0] == 'COMMANDS':
return "LIST BOTS, LIST BRAINS, DUMP BRAIN"
elif commands[0] == 'LIST':
if commands[1] == 'BOTS':
ids = client_context.client.bot_factory.botids()
return ", ".join(ids)
elif commands[1] == 'BRAINS':
botid = commands[2]
bot = client_context.client.bot_factory.bot(botid)
if bot:
ids = bot.brain_factory.brainids()
return ", ".join(ids)
else:
return "No client information available"
elif commands[0] == 'DUMP':
if commands[1] == 'BRAIN':
botid = commands[2]
bot = client_context.client.bot_factory.bot(botid)
if bot is not None:
brainid = commands[3]
brain = bot.brain_factory.brain(brainid)
if brain is not None:
brain.dump_brain_tree()
return "Brain dumped, see config for location"
except Exception as e:
YLogger.exception(client_context, "Failed to execute client admin extension", e)
return "Client Admin Error" | """
Copyright (c) 2016-2018 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.extensions.base import Extension
class ClientAdminExtension(Extension):
# execute() is the interface that is called from the <extension> tag in the AIML
def execute(self, client_context, data):
YLogger.debug(client_context, "Client Admin - [%s]", data)
try:
commands = data.split()
if commands[0] == 'COMMANDS':
return "LIST BOTS, LIST BRAINS, DUMP BRAIN"
elif commands[0] == 'LIST':
if commands[1] == 'BOTS':
ids = client_context.client.bot_factory.botids()
return ", ".join(ids)
elif commands[1] == 'BRAINS':
botid = commands[2]
bot = client_context.client.bot_factory.bot(botid)
if bot:
ids = bot.brain_factory.brainids()
return ", ".join(ids)
else:
return "No client information available"
elif commands[0] == 'DUMP':
if commands[1] == 'BRAIN':
botid = commands[2]
bot = client_context.client.bot_factory.bot(botid)
if bot is not None:
brainid = commands[3]
brain = bot.brain_factory.brain(brainid)
if brain is not None:
brain.dump_brain_tree()
return "Brain dumped, see config for location"
except Exception as e:
YLogger.exception(client_context, "Failed to execute client admin extension", e)
return "Client Admin Error" | en | 0.783776 | Copyright (c) 2016-2018 <NAME> http://www.keithsterling.com Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # execute() is the interface that is called from the <extension> tag in the AIML | 1.966958 | 2 |
src/BlenderClass/importSTL.py | kazulagi/plantFEM | 21 | 6633042 | import bpy
bl_info = {
"name" : "import_STL_object", # プラグイン名
"author" : "<NAME>", # 作者
"version" : (0,1), # プラグインのバージョン
"blender" : (2, 7, 9), # プラグインが動作するBlenderのバージョン
"location" : "3DVIEW > ADD > MESH ", # Blender内部でのプラグインの位置づけ
"description" : "importing STL object", # プラグインの説明
"warning" : "",
"wiki_url" : "https://github.com/kazulagi/plantFEM", # プラグインの説明が存在するWikiページのURL
"tracker_url" : "", # Blender Developer OrgのスレッドURL
"support": "TESTING",
"category" : "Object" # プラグインのカテゴリ名
}
## オブジェクト(ICO球)を生成するオペレーション
#class importSTL(bpy.types.Operator):
#
# bl_idname = "object.create_object_STL"
# bl_label = "import_STL_obj"
# bl_description = "import STL"
# bl_options = {'REGISTER', 'UNDO'}
#
# # メニューを実行した時に呼ばれる関数
# def execute(self, context):
# #bpy.ops.import_mesh.stl()
# bpy.ops.mesh.primitive_ico_sphere_add()
# print("Imported STLs")
# return{'FINISHED'}
# オブジェクト(ICO球)を生成するオペレーション
class CreateObjectSTL(bpy.types.Operator):
bl_idname = "object.create_object5"
bl_label = "objSTL"
bl_description = "creating objSTL"
bl_options = {'REGISTER', 'UNDO'}
# メニューを実行した時に呼ばれる関数
def execute(self, context):
bpy.ops.mesh.primitive_ico_sphere_add()
bpy.ops.export_mesh.stl(filepath="/home/haruka/test/sphere.stl")
print("created STLs")
return {'FINISHED'}
# メニューを構築する関数
def menu_fn(self, context):
self.layout.separator()
self.layout.operator(CreateObjectSTL.bl_idname)
# アドオン有効化時の処理
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_mesh_add.append(menu_fn)
print("Addon importSTL is activated [ok]")
# アドオン無効化時の処理
def unregister():
bpy.types.INFO_MT_mesh_add.remove(menu_fn)
bpy.utils.unregister_module(__name__)
print("Addon importSTL is inactivated [ok]")
# メイン処理
if __name__ == "__main__":
register() | import bpy
bl_info = {
"name" : "import_STL_object", # プラグイン名
"author" : "<NAME>", # 作者
"version" : (0,1), # プラグインのバージョン
"blender" : (2, 7, 9), # プラグインが動作するBlenderのバージョン
"location" : "3DVIEW > ADD > MESH ", # Blender内部でのプラグインの位置づけ
"description" : "importing STL object", # プラグインの説明
"warning" : "",
"wiki_url" : "https://github.com/kazulagi/plantFEM", # プラグインの説明が存在するWikiページのURL
"tracker_url" : "", # Blender Developer OrgのスレッドURL
"support": "TESTING",
"category" : "Object" # プラグインのカテゴリ名
}
## オブジェクト(ICO球)を生成するオペレーション
#class importSTL(bpy.types.Operator):
#
# bl_idname = "object.create_object_STL"
# bl_label = "import_STL_obj"
# bl_description = "import STL"
# bl_options = {'REGISTER', 'UNDO'}
#
# # メニューを実行した時に呼ばれる関数
# def execute(self, context):
# #bpy.ops.import_mesh.stl()
# bpy.ops.mesh.primitive_ico_sphere_add()
# print("Imported STLs")
# return{'FINISHED'}
# オブジェクト(ICO球)を生成するオペレーション
class CreateObjectSTL(bpy.types.Operator):
bl_idname = "object.create_object5"
bl_label = "objSTL"
bl_description = "creating objSTL"
bl_options = {'REGISTER', 'UNDO'}
# メニューを実行した時に呼ばれる関数
def execute(self, context):
bpy.ops.mesh.primitive_ico_sphere_add()
bpy.ops.export_mesh.stl(filepath="/home/haruka/test/sphere.stl")
print("created STLs")
return {'FINISHED'}
# メニューを構築する関数
def menu_fn(self, context):
self.layout.separator()
self.layout.operator(CreateObjectSTL.bl_idname)
# アドオン有効化時の処理
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_mesh_add.append(menu_fn)
print("Addon importSTL is activated [ok]")
# アドオン無効化時の処理
def unregister():
bpy.types.INFO_MT_mesh_add.remove(menu_fn)
bpy.utils.unregister_module(__name__)
print("Addon importSTL is inactivated [ok]")
# メイン処理
if __name__ == "__main__":
register() | ja | 0.981788 | # プラグイン名 # 作者 # プラグインのバージョン # プラグインが動作するBlenderのバージョン # Blender内部でのプラグインの位置づけ # プラグインの説明 # プラグインの説明が存在するWikiページのURL # Blender Developer OrgのスレッドURL # プラグインのカテゴリ名 ## オブジェクト(ICO球)を生成するオペレーション #class importSTL(bpy.types.Operator): # # bl_idname = "object.create_object_STL" # bl_label = "import_STL_obj" # bl_description = "import STL" # bl_options = {'REGISTER', 'UNDO'} # # # メニューを実行した時に呼ばれる関数 # def execute(self, context): # #bpy.ops.import_mesh.stl() # bpy.ops.mesh.primitive_ico_sphere_add() # print("Imported STLs") # return{'FINISHED'} # オブジェクト(ICO球)を生成するオペレーション # メニューを実行した時に呼ばれる関数 # メニューを構築する関数 # アドオン有効化時の処理 # アドオン無効化時の処理 # メイン処理 | 2.191201 | 2 |
desktop/core/ext-py/Django-1.2.3/django/middleware/csrf.py | digideskio/hortonworks-sandbox | 19 | 6633043 | <reponame>digideskio/hortonworks-sandbox<filename>desktop/core/ext-py/Django-1.2.3/django/middleware/csrf.py
"""
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
import itertools
import re
import random
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.hashcompat import md5_constructor
from django.utils.safestring import mark_safe
_POST_FORM_RE = \
re.compile(r'(<form\W[^>]*\bmethod\s*=\s*(\'|"|)POST(\'|"|)\b[^>]*>)', re.IGNORECASE)
_HTML_TYPES = ('text/html', 'application/xhtml+xml')
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
_MAX_CSRF_KEY = 18446744073709551616L # 2 << 63
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_COOKIE = "No CSRF or session cookie."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return md5_constructor("%s%s"
% (randrange(0, _MAX_CSRF_KEY), settings.SECRET_KEY)).hexdigest()
def _make_legacy_session_token(session_id):
return md5_constructor(settings.SECRET_KEY + session_id).hexdigest()
def get_token(request):
"""
Returns the the CSRF token required for a POST form. The token is an
alphanumeric value.
A side effect of calling this function is to make the the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
request.META["CSRF_COOKIE_USED"] = True
return request.META.get("CSRF_COOKIE", None)
def _sanitize_token(token):
# Allow only alphanum, and ensure we return a 'str' for the sake of the post
# processing middleware.
token = re.sub('[^a-zA-Z0-9]', '', str(token.decode('ascii', 'ignore')))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
else:
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
reject = lambda s: _get_failure_view()(request, reason=s)
def accept():
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
# If the user doesn't have a CSRF cookie, generate one and store it in the
# request, so it's available to the view. We'll store it in a cookie when
# we reach the response.
try:
# In case of cookies from untrusted sources, we strip anything
# dangerous at this point, so that the cookie + token will have the
# same, sanitized value.
request.META["CSRF_COOKIE"] = _sanitize_token(request.COOKIES[settings.CSRF_COOKIE_NAME])
cookie_is_new = False
except KeyError:
# No cookie, so create one. This will be sent with the next
# response.
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
# Set a flag to allow us to fall back and allow the session id in
# place of a CSRF cookie for this request only.
cookie_is_new = True
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
if request.method == 'POST':
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite. It comes after
# the creation of CSRF cookies, so that everything else continues to
# work exactly the same (e.g. cookies are sent etc), but before the
# any branches that call reject()
return accept()
if request.is_ajax():
# .is_ajax() is based on the presence of X-Requested-With. In
# the context of a browser, this can only be sent if using
# XmlHttpRequest. Browsers implement careful policies for
# XmlHttpRequest:
#
# * Normally, only same-domain requests are allowed.
#
# * Some browsers (e.g. Firefox 3.5 and later) relax this
# carefully:
#
# * if it is a 'simple' GET or POST request (which can
# include no custom headers), it is allowed to be cross
# domain. These requests will not be recognized as AJAX.
#
# * if a 'preflight' check with the server confirms that the
# server is expecting and allows the request, cross domain
# requests even with custom headers are allowed. These
# requests will be recognized as AJAX, but can only get
# through when the developer has specifically opted in to
# allowing the cross-domain POST request.
#
# So in all cases, it is safe to allow these requests through.
return accept()
if request.is_secure():
# Strict referer checking for HTTPS
referer = request.META.get('HTTP_REFERER')
if referer is None:
return reject(REASON_NO_REFERER)
# The following check ensures that the referer is HTTPS,
# the domains match and the ports match. This might be too strict.
good_referer = 'https://%s/' % request.get_host()
if not referer.startswith(good_referer):
return reject(REASON_BAD_REFERER %
(referer, good_referer))
# If the user didn't already have a CSRF cookie, then fall back to
# the Django 1.1 method (hash of session ID), so a request is not
# rejected if the form was sent to the user before upgrading to the
# Django 1.2 method (session independent nonce)
if cookie_is_new:
try:
session_id = request.COOKIES[settings.SESSION_COOKIE_NAME]
csrf_token = _make_legacy_session_token(session_id)
except KeyError:
# No CSRF cookie and no session cookie. For POST requests,
# we insist on a CSRF cookie, and in this way we can avoid
# all CSRF attacks, including login CSRF.
return reject(REASON_NO_COOKIE)
else:
csrf_token = request.META["CSRF_COOKIE"]
# check incoming token
request_csrf_token = request.POST.get('csrfmiddlewaretoken', None)
if request_csrf_token != csrf_token:
if cookie_is_new:
# probably a problem setting the CSRF cookie
return reject(REASON_NO_CSRF_COOKIE)
else:
return reject(REASON_BAD_TOKEN)
return accept()
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
# If CSRF_COOKIE is unset, then CsrfViewMiddleware.process_view was
# never called, probaby because a request middleware returned a response
# (for example, contrib.auth redirecting to a login page).
if request.META.get("CSRF_COOKIE") is None:
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"], max_age = 60 * 60 * 24 * 7 * 52,
domain=settings.CSRF_COOKIE_DOMAIN)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
class CsrfResponseMiddleware(object):
"""
DEPRECATED
Middleware that post-processes a response to add a csrfmiddlewaretoken.
This exists for backwards compatibility and as an interim measure until
applications are converted to using use the csrf_token template tag
instead. It will be removed in Django 1.4.
"""
def __init__(self):
import warnings
warnings.warn(
"CsrfResponseMiddleware and CsrfMiddleware are deprecated; use CsrfViewMiddleware and the template tag instead (see CSRF documentation).",
PendingDeprecationWarning
)
def process_response(self, request, response):
if getattr(response, 'csrf_exempt', False):
return response
if response['Content-Type'].split(';')[0] in _HTML_TYPES:
csrf_token = get_token(request)
# If csrf_token is None, we have no token for this request, which probably
# means that this is a response from a request middleware.
if csrf_token is None:
return response
# ensure we don't add the 'id' attribute twice (HTML validity)
idattributes = itertools.chain(("id='csrfmiddlewaretoken'",),
itertools.repeat(''))
def add_csrf_field(match):
"""Returns the matched <form> tag plus the added <input> element"""
return mark_safe(match.group() + "<div style='display:none;'>" + \
"<input type='hidden' " + idattributes.next() + \
" name='csrfmiddlewaretoken' value='" + csrf_token + \
"' /></div>")
# Modify any POST forms
response.content, n = _POST_FORM_RE.subn(add_csrf_field, response.content)
if n > 0:
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
# Since the content has been modified, any Etag will now be
# incorrect. We could recalculate, but only if we assume that
# the Etag was set by CommonMiddleware. The safest thing is just
# to delete. See bug #9163
del response['ETag']
return response
class CsrfMiddleware(object):
"""
Django middleware that adds protection against Cross Site
Request Forgeries by adding hidden form fields to POST forms and
checking requests for the correct value.
CsrfMiddleware uses two middleware, CsrfViewMiddleware and
CsrfResponseMiddleware, which can be used independently. It is recommended
to use only CsrfViewMiddleware and use the csrf_token template tag in
templates for inserting the token.
"""
# We can't just inherit from CsrfViewMiddleware and CsrfResponseMiddleware
# because both have process_response methods.
def __init__(self):
self.response_middleware = CsrfResponseMiddleware()
self.view_middleware = CsrfViewMiddleware()
def process_response(self, request, resp):
# We must do the response post-processing first, because that calls
# get_token(), which triggers a flag saying that the CSRF cookie needs
# to be sent (done in CsrfViewMiddleware.process_response)
resp2 = self.response_middleware.process_response(request, resp)
return self.view_middleware.process_response(request, resp2)
def process_view(self, request, callback, callback_args, callback_kwargs):
return self.view_middleware.process_view(request, callback, callback_args,
callback_kwargs)
| """
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
import itertools
import re
import random
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.hashcompat import md5_constructor
from django.utils.safestring import mark_safe
_POST_FORM_RE = \
re.compile(r'(<form\W[^>]*\bmethod\s*=\s*(\'|"|)POST(\'|"|)\b[^>]*>)', re.IGNORECASE)
_HTML_TYPES = ('text/html', 'application/xhtml+xml')
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
_MAX_CSRF_KEY = 18446744073709551616L # 2 << 63
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_COOKIE = "No CSRF or session cookie."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return md5_constructor("%s%s"
% (randrange(0, _MAX_CSRF_KEY), settings.SECRET_KEY)).hexdigest()
def _make_legacy_session_token(session_id):
return md5_constructor(settings.SECRET_KEY + session_id).hexdigest()
def get_token(request):
"""
Returns the the CSRF token required for a POST form. The token is an
alphanumeric value.
A side effect of calling this function is to make the the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
request.META["CSRF_COOKIE_USED"] = True
return request.META.get("CSRF_COOKIE", None)
def _sanitize_token(token):
# Allow only alphanum, and ensure we return a 'str' for the sake of the post
# processing middleware.
token = re.sub('[^a-zA-Z0-9]', '', str(token.decode('ascii', 'ignore')))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
else:
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
reject = lambda s: _get_failure_view()(request, reason=s)
def accept():
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
# If the user doesn't have a CSRF cookie, generate one and store it in the
# request, so it's available to the view. We'll store it in a cookie when
# we reach the response.
try:
# In case of cookies from untrusted sources, we strip anything
# dangerous at this point, so that the cookie + token will have the
# same, sanitized value.
request.META["CSRF_COOKIE"] = _sanitize_token(request.COOKIES[settings.CSRF_COOKIE_NAME])
cookie_is_new = False
except KeyError:
# No cookie, so create one. This will be sent with the next
# response.
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
# Set a flag to allow us to fall back and allow the session id in
# place of a CSRF cookie for this request only.
cookie_is_new = True
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
if request.method == 'POST':
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite. It comes after
# the creation of CSRF cookies, so that everything else continues to
# work exactly the same (e.g. cookies are sent etc), but before the
# any branches that call reject()
return accept()
if request.is_ajax():
# .is_ajax() is based on the presence of X-Requested-With. In
# the context of a browser, this can only be sent if using
# XmlHttpRequest. Browsers implement careful policies for
# XmlHttpRequest:
#
# * Normally, only same-domain requests are allowed.
#
# * Some browsers (e.g. Firefox 3.5 and later) relax this
# carefully:
#
# * if it is a 'simple' GET or POST request (which can
# include no custom headers), it is allowed to be cross
# domain. These requests will not be recognized as AJAX.
#
# * if a 'preflight' check with the server confirms that the
# server is expecting and allows the request, cross domain
# requests even with custom headers are allowed. These
# requests will be recognized as AJAX, but can only get
# through when the developer has specifically opted in to
# allowing the cross-domain POST request.
#
# So in all cases, it is safe to allow these requests through.
return accept()
if request.is_secure():
# Strict referer checking for HTTPS
referer = request.META.get('HTTP_REFERER')
if referer is None:
return reject(REASON_NO_REFERER)
# The following check ensures that the referer is HTTPS,
# the domains match and the ports match. This might be too strict.
good_referer = 'https://%s/' % request.get_host()
if not referer.startswith(good_referer):
return reject(REASON_BAD_REFERER %
(referer, good_referer))
# If the user didn't already have a CSRF cookie, then fall back to
# the Django 1.1 method (hash of session ID), so a request is not
# rejected if the form was sent to the user before upgrading to the
# Django 1.2 method (session independent nonce)
if cookie_is_new:
try:
session_id = request.COOKIES[settings.SESSION_COOKIE_NAME]
csrf_token = _make_legacy_session_token(session_id)
except KeyError:
# No CSRF cookie and no session cookie. For POST requests,
# we insist on a CSRF cookie, and in this way we can avoid
# all CSRF attacks, including login CSRF.
return reject(REASON_NO_COOKIE)
else:
csrf_token = request.META["CSRF_COOKIE"]
# check incoming token
request_csrf_token = request.POST.get('csrfmiddlewaretoken', None)
if request_csrf_token != csrf_token:
if cookie_is_new:
# probably a problem setting the CSRF cookie
return reject(REASON_NO_CSRF_COOKIE)
else:
return reject(REASON_BAD_TOKEN)
return accept()
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
# If CSRF_COOKIE is unset, then CsrfViewMiddleware.process_view was
# never called, probaby because a request middleware returned a response
# (for example, contrib.auth redirecting to a login page).
if request.META.get("CSRF_COOKIE") is None:
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"], max_age = 60 * 60 * 24 * 7 * 52,
domain=settings.CSRF_COOKIE_DOMAIN)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
class CsrfResponseMiddleware(object):
"""
DEPRECATED
Middleware that post-processes a response to add a csrfmiddlewaretoken.
This exists for backwards compatibility and as an interim measure until
applications are converted to using use the csrf_token template tag
instead. It will be removed in Django 1.4.
"""
def __init__(self):
import warnings
warnings.warn(
"CsrfResponseMiddleware and CsrfMiddleware are deprecated; use CsrfViewMiddleware and the template tag instead (see CSRF documentation).",
PendingDeprecationWarning
)
def process_response(self, request, response):
if getattr(response, 'csrf_exempt', False):
return response
if response['Content-Type'].split(';')[0] in _HTML_TYPES:
csrf_token = get_token(request)
# If csrf_token is None, we have no token for this request, which probably
# means that this is a response from a request middleware.
if csrf_token is None:
return response
# ensure we don't add the 'id' attribute twice (HTML validity)
idattributes = itertools.chain(("id='csrfmiddlewaretoken'",),
itertools.repeat(''))
def add_csrf_field(match):
"""Returns the matched <form> tag plus the added <input> element"""
return mark_safe(match.group() + "<div style='display:none;'>" + \
"<input type='hidden' " + idattributes.next() + \
" name='csrfmiddlewaretoken' value='" + csrf_token + \
"' /></div>")
# Modify any POST forms
response.content, n = _POST_FORM_RE.subn(add_csrf_field, response.content)
if n > 0:
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
# Since the content has been modified, any Etag will now be
# incorrect. We could recalculate, but only if we assume that
# the Etag was set by CommonMiddleware. The safest thing is just
# to delete. See bug #9163
del response['ETag']
return response
class CsrfMiddleware(object):
"""
Django middleware that adds protection against Cross Site
Request Forgeries by adding hidden form fields to POST forms and
checking requests for the correct value.
CsrfMiddleware uses two middleware, CsrfViewMiddleware and
CsrfResponseMiddleware, which can be used independently. It is recommended
to use only CsrfViewMiddleware and use the csrf_token template tag in
templates for inserting the token.
"""
# We can't just inherit from CsrfViewMiddleware and CsrfResponseMiddleware
# because both have process_response methods.
def __init__(self):
self.response_middleware = CsrfResponseMiddleware()
self.view_middleware = CsrfViewMiddleware()
def process_response(self, request, resp):
# We must do the response post-processing first, because that calls
# get_token(), which triggers a flag saying that the CSRF cookie needs
# to be sent (done in CsrfViewMiddleware.process_response)
resp2 = self.response_middleware.process_response(request, resp)
return self.view_middleware.process_response(request, resp2)
def process_view(self, request, callback, callback_args, callback_kwargs):
return self.view_middleware.process_view(request, callback, callback_args,
callback_kwargs) | en | 0.883026 | Cross Site Request Forgery Middleware. This module provides a middleware that implements protection against request forgeries from other sites. # Use the system (hardware-based) random number generator if it exists. # 2 << 63 Returns the view to be used for CSRF rejections Returns the the CSRF token required for a POST form. The token is an alphanumeric value. A side effect of calling this function is to make the the csrf_protect decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie' header to the outgoing response. For this reason, you may need to use this function lazily, as is done by the csrf context processor. # Allow only alphanum, and ensure we return a 'str' for the sake of the post # processing middleware. # In case the cookie has been truncated to nothing at some point. Middleware that requires a present and correct csrfmiddlewaretoken for POST requests that have a CSRF cookie, and sets an outgoing CSRF cookie. This middleware should be used in conjunction with the csrf_token template tag. # Avoid checking the request twice by adding a custom attribute to # request. This will be relevant when both decorator and middleware # are used. # If the user doesn't have a CSRF cookie, generate one and store it in the # request, so it's available to the view. We'll store it in a cookie when # we reach the response. # In case of cookies from untrusted sources, we strip anything # dangerous at this point, so that the cookie + token will have the # same, sanitized value. # No cookie, so create one. This will be sent with the next # response. # Set a flag to allow us to fall back and allow the session id in # place of a CSRF cookie for this request only. # Wait until request.META["CSRF_COOKIE"] has been manipulated before # bailing out, so that get_token still works # Mechanism to turn off CSRF checks for test suite. It comes after # the creation of CSRF cookies, so that everything else continues to # work exactly the same (e.g. cookies are sent etc), but before the # any branches that call reject() # .is_ajax() is based on the presence of X-Requested-With. In # the context of a browser, this can only be sent if using # XmlHttpRequest. Browsers implement careful policies for # XmlHttpRequest: # # * Normally, only same-domain requests are allowed. # # * Some browsers (e.g. Firefox 3.5 and later) relax this # carefully: # # * if it is a 'simple' GET or POST request (which can # include no custom headers), it is allowed to be cross # domain. These requests will not be recognized as AJAX. # # * if a 'preflight' check with the server confirms that the # server is expecting and allows the request, cross domain # requests even with custom headers are allowed. These # requests will be recognized as AJAX, but can only get # through when the developer has specifically opted in to # allowing the cross-domain POST request. # # So in all cases, it is safe to allow these requests through. # Strict referer checking for HTTPS # The following check ensures that the referer is HTTPS, # the domains match and the ports match. This might be too strict. # If the user didn't already have a CSRF cookie, then fall back to # the Django 1.1 method (hash of session ID), so a request is not # rejected if the form was sent to the user before upgrading to the # Django 1.2 method (session independent nonce) # No CSRF cookie and no session cookie. For POST requests, # we insist on a CSRF cookie, and in this way we can avoid # all CSRF attacks, including login CSRF. # check incoming token # probably a problem setting the CSRF cookie # If CSRF_COOKIE is unset, then CsrfViewMiddleware.process_view was # never called, probaby because a request middleware returned a response # (for example, contrib.auth redirecting to a login page). # Set the CSRF cookie even if it's already set, so we renew the expiry timer. # Content varies with the CSRF cookie, so set the Vary header. DEPRECATED Middleware that post-processes a response to add a csrfmiddlewaretoken. This exists for backwards compatibility and as an interim measure until applications are converted to using use the csrf_token template tag instead. It will be removed in Django 1.4. # If csrf_token is None, we have no token for this request, which probably # means that this is a response from a request middleware. # ensure we don't add the 'id' attribute twice (HTML validity) Returns the matched <form> tag plus the added <input> element # Modify any POST forms # Content varies with the CSRF cookie, so set the Vary header. # Since the content has been modified, any Etag will now be # incorrect. We could recalculate, but only if we assume that # the Etag was set by CommonMiddleware. The safest thing is just # to delete. See bug #9163 Django middleware that adds protection against Cross Site Request Forgeries by adding hidden form fields to POST forms and checking requests for the correct value. CsrfMiddleware uses two middleware, CsrfViewMiddleware and CsrfResponseMiddleware, which can be used independently. It is recommended to use only CsrfViewMiddleware and use the csrf_token template tag in templates for inserting the token. # We can't just inherit from CsrfViewMiddleware and CsrfResponseMiddleware # because both have process_response methods. # We must do the response post-processing first, because that calls # get_token(), which triggers a flag saying that the CSRF cookie needs # to be sent (done in CsrfViewMiddleware.process_response) | 2.375316 | 2 |
unittests/transfer_ownership_tester.py | asford/pyplusplus | 3 | 6633044 | <reponame>asford/pyplusplus
# Copyright 2004-2008 <NAME>.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import sys
import unittest
import fundamental_tester_base
from pyplusplus import code_creators
from pyplusplus import function_transformers as ft
from pyplusplus.module_builder import call_policies
decref_code = \
"""if (this->m_pyobj) {
//Py_DECREF(this->m_pyobj);
this->m_pyobj = 0;
}"""
incref_code = \
"""
if( !this->m_pyobj) {
this->m_pyobj = boost::python::detail::wrapper_base_::get_owner(*this);
Py_INCREF(this->m_pyobj);
}
"""
impl_conv_code = \
"""
boost::python::implicitly_convertible< std::auto_ptr< %(from)s >, std::auto_ptr< %(to)s > >();
"""
class tester_t(fundamental_tester_base.fundamental_tester_base_t):
EXTENSION_NAME = 'transfer_ownership'
def __init__( self, *args ):
fundamental_tester_base.fundamental_tester_base_t.__init__(
self
, tester_t.EXTENSION_NAME
, *args )
def customize( self, mb ):
event_clss = mb.classes( lambda cls: cls.name in ( 'event_t', 'do_nothing_t' ) )
for cls in event_clss:
cls.add_destructor_code( decref_code )
cls.add_wrapper_code( 'PyObject* m_pyobj;' )
cls.set_constructors_body( 'm_pyobj=0;' )
cls.mem_fun( 'notify' ).add_override_precall_code( incref_code )
cls.mem_fun( 'notify' ).add_default_precall_code( incref_code )
cls.held_type = 'std::auto_ptr< %s >' % cls.wrapper_alias
cls.add_registration_code( impl_conv_code % { 'from' : cls.wrapper_alias
, 'to' : cls.decl_string }
, False)
for base in cls.recursive_bases:
if base.access_type == 'public':
cls.add_registration_code( #from class to its base
impl_conv_code % { 'from' : cls.decl_string
, 'to' : base.related_class.decl_string }
, False)
cls.add_registration_code( #from wrapper to clas base class
impl_conv_code % { 'from' : cls.wrapper_alias
, 'to' : base.related_class.decl_string }
, False)
schedule = mb.mem_fun( 'schedule' )
schedule.add_transformation( ft.transfer_ownership(0), alias='schedule' )
simulator = mb.class_( 'simulator_t' )
simulator.mem_fun( 'get_event' ).call_policies \
= call_policies.return_internal_reference()
def run_tests( self, module):
class py_event_t( module.event_t ):
def __init__( self, container ):
module.event_t.__init__( self )
self.container = container
def notify( self ):
self.container.append( 1 )
print('1')
notify_data = []
simulator = module.simulator_t()
print('2')
event = py_event_t( notify_data )
print('3')
simulator.schedule( event )
print('refcount: ', sys.getrefcount( event ))
print('4')
del event
print('5')
simulator.run()
print('6')
self.failUnless( notify_data[0] == 1 )
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite()
| # Copyright 2004-2008 <NAME>.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import sys
import unittest
import fundamental_tester_base
from pyplusplus import code_creators
from pyplusplus import function_transformers as ft
from pyplusplus.module_builder import call_policies
decref_code = \
"""if (this->m_pyobj) {
//Py_DECREF(this->m_pyobj);
this->m_pyobj = 0;
}"""
incref_code = \
"""
if( !this->m_pyobj) {
this->m_pyobj = boost::python::detail::wrapper_base_::get_owner(*this);
Py_INCREF(this->m_pyobj);
}
"""
impl_conv_code = \
"""
boost::python::implicitly_convertible< std::auto_ptr< %(from)s >, std::auto_ptr< %(to)s > >();
"""
class tester_t(fundamental_tester_base.fundamental_tester_base_t):
EXTENSION_NAME = 'transfer_ownership'
def __init__( self, *args ):
fundamental_tester_base.fundamental_tester_base_t.__init__(
self
, tester_t.EXTENSION_NAME
, *args )
def customize( self, mb ):
event_clss = mb.classes( lambda cls: cls.name in ( 'event_t', 'do_nothing_t' ) )
for cls in event_clss:
cls.add_destructor_code( decref_code )
cls.add_wrapper_code( 'PyObject* m_pyobj;' )
cls.set_constructors_body( 'm_pyobj=0;' )
cls.mem_fun( 'notify' ).add_override_precall_code( incref_code )
cls.mem_fun( 'notify' ).add_default_precall_code( incref_code )
cls.held_type = 'std::auto_ptr< %s >' % cls.wrapper_alias
cls.add_registration_code( impl_conv_code % { 'from' : cls.wrapper_alias
, 'to' : cls.decl_string }
, False)
for base in cls.recursive_bases:
if base.access_type == 'public':
cls.add_registration_code( #from class to its base
impl_conv_code % { 'from' : cls.decl_string
, 'to' : base.related_class.decl_string }
, False)
cls.add_registration_code( #from wrapper to clas base class
impl_conv_code % { 'from' : cls.wrapper_alias
, 'to' : base.related_class.decl_string }
, False)
schedule = mb.mem_fun( 'schedule' )
schedule.add_transformation( ft.transfer_ownership(0), alias='schedule' )
simulator = mb.class_( 'simulator_t' )
simulator.mem_fun( 'get_event' ).call_policies \
= call_policies.return_internal_reference()
def run_tests( self, module):
class py_event_t( module.event_t ):
def __init__( self, container ):
module.event_t.__init__( self )
self.container = container
def notify( self ):
self.container.append( 1 )
print('1')
notify_data = []
simulator = module.simulator_t()
print('2')
event = py_event_t( notify_data )
print('3')
simulator.schedule( event )
print('refcount: ', sys.getrefcount( event ))
print('4')
del event
print('5')
simulator.run()
print('6')
self.failUnless( notify_data[0] == 1 )
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite() | en | 0.302287 | # Copyright 2004-2008 <NAME>. # Distributed under the Boost Software License, Version 1.0. (See # accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) if (this->m_pyobj) { //Py_DECREF(this->m_pyobj); this->m_pyobj = 0; } if( !this->m_pyobj) { this->m_pyobj = boost::python::detail::wrapper_base_::get_owner(*this); Py_INCREF(this->m_pyobj); } boost::python::implicitly_convertible< std::auto_ptr< %(from)s >, std::auto_ptr< %(to)s > >(); #from class to its base #from wrapper to clas base class | 1.869387 | 2 |
ex03/hello.py | Juju-62q/docker-handson | 1 | 6633045 | <filename>ex03/hello.py<gh_stars>1-10
print ("Hello Dockerfile COPY command!")
| <filename>ex03/hello.py<gh_stars>1-10
print ("Hello Dockerfile COPY command!")
| none | 1 | 1.28737 | 1 |
|
src/valid_num.py | qtKite/leetcode-submissions | 0 | 6633046 | def isNumber(s):
# can only contain 1 e
# only one decimal
# no other chars allowed
# only one sign bit which is the left most
# can contain spaces on left and right and not in between
s = s.strip()
decimal_count = 0
e_count = 0
e_pos = 0
sign_count = 0
sign_pos = 0
index = 0
num_arr = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
has_error = False
num_of_nums = 0
for i in s:
if i == '-':
sign_count += 1
sign_pos = index
if i == '+':
sign_count += 1
sign_pos = index
if i not in num_arr:
if i != 'e':
if i != '.':
has_error = True
else:
e_count += 1
e_pos = index
else:
num_of_nums += 1
if i == '.':
decimal_count += 1
index += 1
if e_count > 1:
print("1")
has_error = True
if e_count == 1 and num_of_nums == 0:
print("2")
has_error = True
if e_count >= 1 and e_pos == 0:
print("3")
has_error = True
if sign_count > 1:
print("4")
has_error = True
if sign_count == 1 and sign_pos != 0:
print("5")
has_error = True
if decimal_count > 1:
print("6")
has_error = True
if num_of_nums == 0:
print("7")
has_error = True
return not has_error
print(isNumber('-1.'))
| def isNumber(s):
# can only contain 1 e
# only one decimal
# no other chars allowed
# only one sign bit which is the left most
# can contain spaces on left and right and not in between
s = s.strip()
decimal_count = 0
e_count = 0
e_pos = 0
sign_count = 0
sign_pos = 0
index = 0
num_arr = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
has_error = False
num_of_nums = 0
for i in s:
if i == '-':
sign_count += 1
sign_pos = index
if i == '+':
sign_count += 1
sign_pos = index
if i not in num_arr:
if i != 'e':
if i != '.':
has_error = True
else:
e_count += 1
e_pos = index
else:
num_of_nums += 1
if i == '.':
decimal_count += 1
index += 1
if e_count > 1:
print("1")
has_error = True
if e_count == 1 and num_of_nums == 0:
print("2")
has_error = True
if e_count >= 1 and e_pos == 0:
print("3")
has_error = True
if sign_count > 1:
print("4")
has_error = True
if sign_count == 1 and sign_pos != 0:
print("5")
has_error = True
if decimal_count > 1:
print("6")
has_error = True
if num_of_nums == 0:
print("7")
has_error = True
return not has_error
print(isNumber('-1.'))
| en | 0.904763 | # can only contain 1 e # only one decimal # no other chars allowed # only one sign bit which is the left most # can contain spaces on left and right and not in between | 3.655346 | 4 |
mmtbx/disorder/analyze_model.py | hbrunie/cctbx_project | 2 | 6633047 | <filename>mmtbx/disorder/analyze_model.py<gh_stars>1-10
from __future__ import absolute_import, division, print_function
from mmtbx.disorder import backbone
from scitbx.array_family import flex
from scitbx.matrix import col
from libtbx.str_utils import format_value as fv
from libtbx import Auto, slots_getstate_setstate
import math
import sys
from six.moves import range
# XXX in order to make this run in parallel over many PDB IDs, I need to cheat
# slightly and substitute pickle-able objects for the original classes in
# iotbx.pdb.hierarchy. Note that parent relationships will be lost in the
# process.
class residue_group_proxy(slots_getstate_setstate):
"""Pickle-able stand-in for iotbx.pdb.hierarchy.residue_group."""
__slots__ = ["resseq", "icode", "_atom_groups", "_id_str", ]
def __init__(self, residue_group):
self.resseq = residue_group.resseq
self.icode = residue_group.icode
self._id_str = residue_group.id_str()
self._atom_groups = [ ]
for ag in residue_group.atom_groups():
self._atom_groups.append(atom_group_proxy(ag))
def id_str(self):
return self._id_str
def atom_groups(self):
return self._atom_groups
class atom_group_proxy(slots_getstate_setstate):
"""Pickle-able stand-in for iotbx.pdb.hierarchy.atom_group."""
__slots__ = [ "resname", "altloc", "_atoms", ]
def __init__(self, atom_group):
self.resname = atom_group.resname
self.altloc = atom_group.altloc
self._atoms = atoms_proxy(atom_group.atoms())
def atoms(self):
return self._atoms
class atoms_proxy(slots_getstate_setstate):
"""
Pickle-able stand-in for af::shared<atom> array, using the atom_with_labels
objects as elements.
"""
__slots__ = [ "_atoms" ]
def __init__(self, atoms):
self._atoms = [ a.fetch_labels() for a in atoms ]
def __getitem__(self, idx):
return self._atoms[idx]
def extract_occ(self):
return flex.double([ a.occ for a in self._atoms ])
def extract_b(self):
return flex.double([ a.b for a in self._atoms ])
class disordered_segment(object):
"""
A group of one or more adjacent residues presumed to form continuous
alternate conformations.
"""
def __init__(self, residue_group):
self.residue_groups = [ ]
self.outliers = {}
self.rotamers = {}
self.ramachandran = {}
self.backrubs = []
self.append_residue_group(residue_group)
def __str__(self):
if (self.n_residues() == 1):
return self.residue_groups[0].id_str()
else :
return "%s --> %s" % (self.residue_groups[0].id_str(),
self.residue_groups[-1].id_str())
def show(self, prefix="", out=sys.stdout):
if (self.n_residues() == 1):
print(prefix + "Segment: 1 residue (%s), %d conformers" % \
(self.residue_groups[0].id_str(), self.n_confs()), file=out)
else :
print(prefix+"Segment: %d residues (%s --> %s), %d conformers" %\
(self.n_residues(), self.residue_groups[0].id_str(),
self.residue_groups[-1].id_str(), self.n_confs()), file=out)
for i_res, rg in enumerate(self.residue_groups):
print(prefix+" residue_group=%s" % rg.id_str(), file=out)
for ag in rg.atom_groups():
rama = rota = None
for o in self.ramachandran.get(rg.id_str(), []):
if (o.altloc == ag.altloc):
rama = o
break
for o in self.rotamers.get(rg.id_str(), []):
if (o.altloc == ag.altloc):
rota = o
break
print(prefix + " " + \
"atom_group=%1s %3s occ=%.2f phi=%-6s psi=%-6s rot=%-7s" %\
(ag.altloc,
ag.resname,
flex.mean(ag.atoms().extract_occ()),
fv("%.1f", getattr(rama, "phi", None)),
fv("%.1f", getattr(rama, "psi", None)),
getattr(rota, "rotamer_name", None)), file=out)
if (len(self.backrubs[i_res]) > 0):
for backrub in self.backrubs[i_res] :
backrub.show(out=out, prefix=prefix+" ")
outliers = self.outliers[rg.id_str()]
if (len(outliers) > 0):
print(prefix+" MolProbity outliers:", file=out)
for outlier in outliers :
print(prefix+" %s: %s" % (type(outlier).__name__,
str(outlier)), file=out)
def get_previous_conformer(self, index=0):
rg = self.residue_groups[-1]
i_group = 0
for atom_group in rg.atom_groups():
if (atom_group.altloc.strip() != ''):
if (i_group == index):
return atom_group
else :
i_group += 1
return None
def is_part_of_segment(self, other,
ignore_inconsistent_occupancy=False,
ignore_inconsistent_n_conformers=False,
max_peptide_bond_distance_within_conformer=2.0):
"""
Determine whether a residue_group object is part of the same continuous
disordered segment. The precise meaning of this can be adjusted depending
on user preferences; by default a continuous segment must have the same
number of conformers for each residue, and occupancies must be constrained
for each conformation. The latter assumption will probably be violated
most often.
"""
other_groups = other.atom_groups()
assert len(other_groups) >= 2
if (len(other_groups) != len(self.residue_groups[-1].atom_groups())):
if (not ignore_inconsistent_n_conformers):
return False
i_group = 0
for atom_group in other_groups :
if (atom_group.altloc != ''):
other_atoms = atom_group.atoms()
prev_group = self.get_previous_conformer(index=i_group)
if (prev_group is None):
assert ignore_inconsistent_n_conformers
break
i_group += 1
if (prev_group.altloc != atom_group.altloc):
return False
prev_atoms = prev_group.atoms()
if (prev_atoms[0].occ != other_atoms[0].occ):
if (not ignore_inconsistent_occupancy):
return False
curr_n, prev_c = None, None
for atom in prev_atoms :
if (atom.name == " C "):
prev_c = atom.xyz
break
for atom in other_atoms :
if (atom.name == " N "):
curr_n = atom.xyz
break
if (curr_n is None) or (prev_c is None):
return False
dist = abs(col(curr_n) - col(prev_c))
if (dist > max_peptide_bond_distance_within_conformer):
return False
return True
def append_residue_group(self, rg):
self.residue_groups.append(residue_group_proxy(rg))
rg_backrubs = backbone.find_backrubs(residue_group=rg)
self.backrubs.append(rg_backrubs)
def detect_sequence_disorder(self):
"""
Find any residue groups with heterogeneous chemical identity.
"""
disordered = []
for rg in self.residue_groups :
resnames = set([ ag.resname.upper() for ag in rg.atom_groups() ])
if (len(resnames) > 1):
disordered.append((rg.id_str(), sorted(list(resnames))))
return disordered
def n_residues(self):
return len(self.residue_groups)
def n_partial_splits(self, join_at_calpha=False):
"""
Count the number of residues where not all atoms have alternates.
"""
n_partial = 0
for residue_group in self.residue_groups :
for atom_group in residue_group.atom_groups():
if (atom_group.altloc.strip() == ''):
if (join_at_calpha):
for atom in atom_group.atoms():
if (atom.name == " CA "):
n_partial += 1
break
else :
n_partial += 1
break
return n_partial
def n_confs(self):
"""
Count the number of alternate conformations. Sometimes this may not be
the same for all residue groups, in which case a list is returned.
"""
all_n_confs = []
for residue_group in self.residue_groups :
all_n_confs.append(0)
for atom_group in residue_group.atom_groups():
if (atom_group.altloc.strip() != ''):
all_n_confs[-1] += 1
all_n_confs_uniq = set(all_n_confs)
if (len(all_n_confs_uniq) != 1):
return sorted(list(all_n_confs_uniq))
return all_n_confs_uniq.pop()
def n_confs_max(self):
n_confs = self.n_confs()
if isinstance(n_confs, int):
return n_confs
return max(n_confs)
def minimum_atom_group_occupancy(self):
occ_min = 1.
for rg in self.residue_groups :
for ag in rg.atom_groups():
ag_atoms = ag.atoms()
total = 0
n_non_hd = 0
for atom in ag.atoms():
if (atom.element.strip() not in ["H", "D"]) and (atom.occ != 0):
total += atom.occ
n_non_hd += 1
if (total != 0):
occ_mean_ag = total / n_non_hd
occ_min = min(occ_min, occ_mean_ag)
return occ_min
def get_all_conformer_distances(self, backbone=None):
n_confs = self.n_confs()
assert isinstance(n_confs, int)
pairwise_distances = []
for i_conf in range(n_confs - 1):
indices = [i_conf, i_conf + 1]
pairwise_distances.append(self.get_conformer_distances(
conformer_indices=indices,
backbone=backbone))
return pairwise_distances
def get_conformer_distances(self,
conformer_indices=Auto,
backbone=None):
"""
Calculate the distances between atoms in the specified pair of conformers
(must be present for all residue groups).
"""
# XXX the way this is handled is somewhat clumsy, but necessary because
# there is no requirement that atom groups have the same number of atoms or
# even the same chemical identity (although they are assumed to be amino
# acids)
distances = []
for rg in self.residue_groups :
i_ag = 0
atom_groups = rg.atom_groups()
if (conformer_indices is Auto):
if (atom_groups[0].altloc.strip() == ''):
if (len(atom_groups) <= 2):
continue
else :
conformer_indices = (1,2)
else :
conformer_indices = (0,1)
else :
assert (len(conformer_indices) == 2)
ag1 = rg.atom_groups()[conformer_indices[0]]
ag2 = rg.atom_groups()[conformer_indices[1]]
if ((ag1.altloc.strip() == '') and
(conformer_indices[0] == 0) and
(len(atom_groups) >= 3)):
ag1 = rg.atom_groups()[conformer_indices[0]+1]
ag2 = rg.atom_groups()[conformer_indices[1]+1]
for atom1 in ag1.atoms():
name = atom1.name.strip()
element = atom1.element.upper().strip()
if (element in ["H","D"]):
continue
if (backbone is not None):
if (((backbone) and (not name in ["C","CA","N","O"])) or
((not backbone) and (name in ["C","CA","N","O"]))):
continue
for atom2 in ag2.atoms():
if (atom1.name == atom2.name):
distances.append(abs(col(atom1.xyz) - col(atom2.xyz)))
return distances
def max_distance_between_conformers(self, backbone=None):
paired_distances = self.get_all_conformer_distances(backbone=backbone)
paired_max = []
for distances in paired_distances :
if (len(distances) > 0):
paired_max.append(max(distances))
if (len(paired_max) > 0):
return max(paired_max)
return None
def max_rmsd_between_conformers(self, backbone=None):
paired_distances = self.get_all_conformer_distances(backbone=backbone)
rmsd_max = None
for distances in paired_distances :
if (len(distances) == 0):
continue
rmsd = math.sqrt(sum([ dxyz**2 for dxyz in distances]) / len(distances))
if (rmsd_max is None) or (rmsd > rmsd_max):
rmsd_max = rmsd
return rmsd_max
def extract_validation_results(self, multi_criterion):
"""
Find the matching validation result objects from the multi-criterion
object (see mmtbx/validation/molprobity/__init__.py).
"""
for rg in self.residue_groups :
self.outliers[rg.id_str()] = []
self.rotamers[rg.id_str()] = []
self.ramachandran[rg.id_str()] = []
results = multi_criterion.get_residue_group_data(rg)
for result in results.outliers :
if result.is_outlier():
self.outliers[rg.id_str()].append(result)
if type(result).__name__ == "rotamer" :
self.rotamers[rg.id_str()].append(result)
elif type(result).__name__ == "ramachandran" :
self.ramachandran[rg.id_str()].append(result)
def n_rotamer_changes(self, resname=None):
n_changes = 0
for rg in self.residue_groups :
resnames = set([ ag.resname.upper() for ag in rg.atom_groups() ])
if (len(resnames) > 1):
continue
elif (resname is not None) and (resnames.pop() != resname.upper()):
continue
rotamers = set(self.rotamers.get(rg.id_str(), []))
if (len(rotamers) > 1):
n_changes += 1
return n_changes
def find_peptide_flips(self, angle_cutoff=150):
residues_and_angles = []
for rg in self.residue_groups :
peptide_angle = carbonyl_oxygen_angle(rg)
if (peptide_angle >= angle_cutoff):
residues_and_angles.append((rg.id_str(), peptide_angle))
return residues_and_angles
def n_cbeta_outliers(self):
return self.n_outliers_of_type(analysis_type='cbeta')
def n_outliers_of_type(self, analysis_type):
n_outliers = 0
for rg in self.residue_groups :
results = self.outliers.get(rg.id_str(), [])
for result in results :
if (type(result).__name__ == analysis_type) and result.is_outlier():
n_outliers += 1
return n_outliers
#-----------------------------------------------------------------------
# utility methods
def is_joined_at_calpha(residue_group):
for atom_group in residue_group.atom_groups():
if (atom_group.altloc.strip() == ''):
for atom in atom_group.atoms():
if (atom.name == " CA "):
return True
return False
def carbonyl_oxygen_angle(residue_group):
"""
Calculate angles between carbonyl oxygen (C=O) bonds in each pair of atom
groups, and return the maximum value (or None if fewer than two such bonds
are found).
"""
c_o_vectors = []
for atom_group in residue_group.atom_groups():
c_xyz = o_xyz = None
for atom in atom_group.atoms():
if (atom.name.strip() == "O"):
o_xyz = col(atom.xyz)
elif (atom.name.strip() == "C"):
c_xyz = col(atom.xyz)
if (not None in [c_xyz, o_xyz]):
c_o_vectors.append(c_xyz - o_xyz)
if (len(c_o_vectors) >= 2):
angles = []
i_ag = 0
while (i_ag < len(c_o_vectors) - 1):
angles.append(c_o_vectors[i_ag].angle(c_o_vectors[i_ag+1], deg=True))
i_ag += 1
return max(angles)
return None
def only_amide_hydrogen_split(residue_group):
"""
Detect cases where the only alternate conformation is for the amide hydrogen,
presumably because the previous residue was split and Reduce was used to
add hydrogens. These residues are ignored in our analyses.
"""
for atom in residue_group.atoms():
labels = atom.fetch_labels()
if (labels.altloc.strip() != '') and (atom.name != " H "):
return False
return True
# XXX unused?
def get_nconfs(pdb_hierarchy):
"""
Count the number of conformers in a structure.
"""
if (len(pdb_hierarchy.models()) > 1):
n_confs = -1 # multiple MODELs aren't handled
else :
for chain in pdb_hierarchy.only_model().chains():
if (chain.is_protein()):
confs = chain.conformers()
if (len(confs) > n_confs):
n_confs = len(confs)
return n_confs
#-----------------------------------------------------------------------
class process_residue_groups(object):
def __init__(self, chain,
multi_criterion_validation=None,
ignore_inconsistent_occupancy=False,
log=sys.stdout):
self.segments = []
self.chain_id = chain.id
self.n_residue_groups = 0
self.n_disordered = 0
self.residue_counts = {}
self.disordered_residue_counts = {}
assert chain.is_protein()
segment = None
for residue_group in chain.residue_groups():
self.n_residue_groups += 1
atom_groups = residue_group.atom_groups()
resname_1 = atom_groups[0].resname
if (not resname_1 in self.residue_counts):
self.residue_counts[resname_1] = 0
self.residue_counts[resname_1] += 1
if (len(atom_groups) > 1):
self.n_disordered += 1
if only_amide_hydrogen_split(residue_group):
print(" residue %s only has alt. confs. for H" % \
residue_group.id_str(), file=log)
segment = None
continue
else :
if (not resname_1 in self.disordered_residue_counts):
self.disordered_residue_counts[resname_1] = 0
self.disordered_residue_counts[resname_1] += 1
if (segment is None):
segment = disordered_segment(residue_group)
self.segments.append(segment)
else :
if segment.is_part_of_segment(other=residue_group,
ignore_inconsistent_occupancy=ignore_inconsistent_occupancy):
segment.append_residue_group(residue_group)
else :
segment = disordered_segment(residue_group)
self.segments.append(segment)
else :
segment = None
if (multi_criterion_validation is not None):
for segment in self.segments :
segment.extract_validation_results(multi_criterion_validation)
def show(self, prefix="", out=sys.stdout):
print(prefix+"Chain '%s': %d residues, %d disordered" % (
self.chain_id, self.n_residue_groups, self.n_disordered), file=out)
for segment in self.segments :
segment.show(out=out, prefix=prefix+" ")
class process_pdb_hierarchy(object):
def __init__(self, pdb_hierarchy,
validation,
ignore_inconsistent_occupancy=False,
log=sys.stdout):
self.chains = []
self.n_residue_groups = 0
self.n_disordered = 0
self.sequence_disorder = []
self.n_rama_outliers = validation.ramalyze.n_outliers
self.n_rota_outliers = validation.rotalyze.n_outliers
self.n_cbeta_outliers = validation.cbetadev.n_outliers
multi_criterion_validation = None
if (validation is not None):
multi_criterion_validation = validation.as_multi_criterion_view()
for chain in pdb_hierarchy.only_model().chains():
if (chain.is_protein()):
print(" processing chain '%s'" % chain.id, file=log)
chain_info = process_residue_groups(chain=chain,
multi_criterion_validation=multi_criterion_validation,
ignore_inconsistent_occupancy=ignore_inconsistent_occupancy,
log=log)
self.chains.append(chain_info)
self.n_residue_groups += chain_info.n_residue_groups
self.n_disordered += chain_info.n_disordered
for segment in chain_info.segments :
self.sequence_disorder.extend(segment.detect_sequence_disorder())
else :
print(" skipping non-protein chain '%s'" % chain.id, file=log)
# TODO post-analysis
@property
def segments(self):
for chain in self.chains :
for segment in chain.segments :
yield segment
def max_rmsd_between_conformers(self, backbone=None):
rmsd_max = segment_max = None
for segment in self.segments :
rmsd = segment.max_rmsd_between_conformers(backbone=backbone)
if (rmsd_max is None) or (rmsd > rmsd_max):
rmsd_max = rmsd
segment_max = segment
return rmsd_max, segment_max
def max_distance_between_conformers(self, backbone=None):
dist_max = segment_max = None
for segment in self.segments :
dist = segment.max_distance_between_conformers(backbone=backbone)
if (dist_max is None) or (dist > dist_max):
dist_max = dist
segment_max = segment
return dist_max, segment_max
def show(self, out=sys.stdout, verbose=True):
print("", file=out)
print("Overall: %d protein chain(s)" % len(self.chains), file=out)
print(" %d residues" % self.n_residue_groups, file=out)
print(" %d disorered in %d segments" % (self.n_disordered,
sum([ len(c.segments) for c in self.chains ])), file=out)
if (len(self.sequence_disorder) > 0):
print("%d heterogeneous residues:" % len(self.sequence_disorder), file=out)
for rg_id, resnames in self.sequence_disorder :
print(" %s (%s)" % (rg_id, ",".join(resnames)))
n_rotamer_changes = n_cbeta_dev = n_partial_splits = 0
peptide_flips = []
for segment in self.segments :
n_rotamer_changes += segment.n_rotamer_changes()
n_cbeta_dev += segment.n_cbeta_outliers()
n_partial_splits += segment.n_partial_splits(join_at_calpha=True)
peptide_flips.extend(segment.find_peptide_flips())
print("%d disordered residues have multiple rotamers" % \
n_rotamer_changes, file=out)
if (n_partial_splits > 0):
print("%d disordered residues have a single C-alpha atom" % \
n_partial_splits, file=out)
if (n_cbeta_dev > 0):
print("%d disordered residues have C-beta deviations" % \
n_cbeta_dev, file=out)
if (len(peptide_flips) > 0):
print("%d apparent peptide flips:", file=out)
for residue_id_str, angle in peptide_flips :
print(" %s (angle=%.1f)" % (residue_id_str, angle), file=out)
# distances and RMSDs
rmsd_max, segment_max = self.max_rmsd_between_conformers()
rmsd_mc_max, segment_mc_max = self.max_rmsd_between_conformers(
backbone=True)
assert (rmsd_max is not None)
print("Max. RMSD between conformers:", file=out)
print(" %6.3f (%s) [all non-H atoms]" % (rmsd_max, segment_max), file=out)
if (rmsd_mc_max is not None):
print(" %6.3f (%s) [backbone only]" %(rmsd_mc_max,
segment_mc_max), file=out)
dist_max, segment_max = self.max_distance_between_conformers()
dist_mc_max, segment_mc_max = self.max_distance_between_conformers(
backbone=True)
assert (dist_max is not None)
print("Max. distance between conformers:", file=out)
print(" %6.3f (%s) [all non-H atoms]" % (dist_max, segment_max), file=out)
if (dist_mc_max is not None):
print(" %6.3f (%s) [backbone only]" %(dist_mc_max,
segment_mc_max), file=out)
# verbose output
if (verbose):
for chain in self.chains :
chain.show(out=out)
else :
print("Run with --verbose to show per-residue results.", file=out)
print("", file=out)
| <filename>mmtbx/disorder/analyze_model.py<gh_stars>1-10
from __future__ import absolute_import, division, print_function
from mmtbx.disorder import backbone
from scitbx.array_family import flex
from scitbx.matrix import col
from libtbx.str_utils import format_value as fv
from libtbx import Auto, slots_getstate_setstate
import math
import sys
from six.moves import range
# XXX in order to make this run in parallel over many PDB IDs, I need to cheat
# slightly and substitute pickle-able objects for the original classes in
# iotbx.pdb.hierarchy. Note that parent relationships will be lost in the
# process.
class residue_group_proxy(slots_getstate_setstate):
"""Pickle-able stand-in for iotbx.pdb.hierarchy.residue_group."""
__slots__ = ["resseq", "icode", "_atom_groups", "_id_str", ]
def __init__(self, residue_group):
self.resseq = residue_group.resseq
self.icode = residue_group.icode
self._id_str = residue_group.id_str()
self._atom_groups = [ ]
for ag in residue_group.atom_groups():
self._atom_groups.append(atom_group_proxy(ag))
def id_str(self):
return self._id_str
def atom_groups(self):
return self._atom_groups
class atom_group_proxy(slots_getstate_setstate):
"""Pickle-able stand-in for iotbx.pdb.hierarchy.atom_group."""
__slots__ = [ "resname", "altloc", "_atoms", ]
def __init__(self, atom_group):
self.resname = atom_group.resname
self.altloc = atom_group.altloc
self._atoms = atoms_proxy(atom_group.atoms())
def atoms(self):
return self._atoms
class atoms_proxy(slots_getstate_setstate):
"""
Pickle-able stand-in for af::shared<atom> array, using the atom_with_labels
objects as elements.
"""
__slots__ = [ "_atoms" ]
def __init__(self, atoms):
self._atoms = [ a.fetch_labels() for a in atoms ]
def __getitem__(self, idx):
return self._atoms[idx]
def extract_occ(self):
return flex.double([ a.occ for a in self._atoms ])
def extract_b(self):
return flex.double([ a.b for a in self._atoms ])
class disordered_segment(object):
"""
A group of one or more adjacent residues presumed to form continuous
alternate conformations.
"""
def __init__(self, residue_group):
self.residue_groups = [ ]
self.outliers = {}
self.rotamers = {}
self.ramachandran = {}
self.backrubs = []
self.append_residue_group(residue_group)
def __str__(self):
if (self.n_residues() == 1):
return self.residue_groups[0].id_str()
else :
return "%s --> %s" % (self.residue_groups[0].id_str(),
self.residue_groups[-1].id_str())
def show(self, prefix="", out=sys.stdout):
if (self.n_residues() == 1):
print(prefix + "Segment: 1 residue (%s), %d conformers" % \
(self.residue_groups[0].id_str(), self.n_confs()), file=out)
else :
print(prefix+"Segment: %d residues (%s --> %s), %d conformers" %\
(self.n_residues(), self.residue_groups[0].id_str(),
self.residue_groups[-1].id_str(), self.n_confs()), file=out)
for i_res, rg in enumerate(self.residue_groups):
print(prefix+" residue_group=%s" % rg.id_str(), file=out)
for ag in rg.atom_groups():
rama = rota = None
for o in self.ramachandran.get(rg.id_str(), []):
if (o.altloc == ag.altloc):
rama = o
break
for o in self.rotamers.get(rg.id_str(), []):
if (o.altloc == ag.altloc):
rota = o
break
print(prefix + " " + \
"atom_group=%1s %3s occ=%.2f phi=%-6s psi=%-6s rot=%-7s" %\
(ag.altloc,
ag.resname,
flex.mean(ag.atoms().extract_occ()),
fv("%.1f", getattr(rama, "phi", None)),
fv("%.1f", getattr(rama, "psi", None)),
getattr(rota, "rotamer_name", None)), file=out)
if (len(self.backrubs[i_res]) > 0):
for backrub in self.backrubs[i_res] :
backrub.show(out=out, prefix=prefix+" ")
outliers = self.outliers[rg.id_str()]
if (len(outliers) > 0):
print(prefix+" MolProbity outliers:", file=out)
for outlier in outliers :
print(prefix+" %s: %s" % (type(outlier).__name__,
str(outlier)), file=out)
def get_previous_conformer(self, index=0):
rg = self.residue_groups[-1]
i_group = 0
for atom_group in rg.atom_groups():
if (atom_group.altloc.strip() != ''):
if (i_group == index):
return atom_group
else :
i_group += 1
return None
def is_part_of_segment(self, other,
ignore_inconsistent_occupancy=False,
ignore_inconsistent_n_conformers=False,
max_peptide_bond_distance_within_conformer=2.0):
"""
Determine whether a residue_group object is part of the same continuous
disordered segment. The precise meaning of this can be adjusted depending
on user preferences; by default a continuous segment must have the same
number of conformers for each residue, and occupancies must be constrained
for each conformation. The latter assumption will probably be violated
most often.
"""
other_groups = other.atom_groups()
assert len(other_groups) >= 2
if (len(other_groups) != len(self.residue_groups[-1].atom_groups())):
if (not ignore_inconsistent_n_conformers):
return False
i_group = 0
for atom_group in other_groups :
if (atom_group.altloc != ''):
other_atoms = atom_group.atoms()
prev_group = self.get_previous_conformer(index=i_group)
if (prev_group is None):
assert ignore_inconsistent_n_conformers
break
i_group += 1
if (prev_group.altloc != atom_group.altloc):
return False
prev_atoms = prev_group.atoms()
if (prev_atoms[0].occ != other_atoms[0].occ):
if (not ignore_inconsistent_occupancy):
return False
curr_n, prev_c = None, None
for atom in prev_atoms :
if (atom.name == " C "):
prev_c = atom.xyz
break
for atom in other_atoms :
if (atom.name == " N "):
curr_n = atom.xyz
break
if (curr_n is None) or (prev_c is None):
return False
dist = abs(col(curr_n) - col(prev_c))
if (dist > max_peptide_bond_distance_within_conformer):
return False
return True
def append_residue_group(self, rg):
self.residue_groups.append(residue_group_proxy(rg))
rg_backrubs = backbone.find_backrubs(residue_group=rg)
self.backrubs.append(rg_backrubs)
def detect_sequence_disorder(self):
"""
Find any residue groups with heterogeneous chemical identity.
"""
disordered = []
for rg in self.residue_groups :
resnames = set([ ag.resname.upper() for ag in rg.atom_groups() ])
if (len(resnames) > 1):
disordered.append((rg.id_str(), sorted(list(resnames))))
return disordered
def n_residues(self):
return len(self.residue_groups)
def n_partial_splits(self, join_at_calpha=False):
"""
Count the number of residues where not all atoms have alternates.
"""
n_partial = 0
for residue_group in self.residue_groups :
for atom_group in residue_group.atom_groups():
if (atom_group.altloc.strip() == ''):
if (join_at_calpha):
for atom in atom_group.atoms():
if (atom.name == " CA "):
n_partial += 1
break
else :
n_partial += 1
break
return n_partial
def n_confs(self):
"""
Count the number of alternate conformations. Sometimes this may not be
the same for all residue groups, in which case a list is returned.
"""
all_n_confs = []
for residue_group in self.residue_groups :
all_n_confs.append(0)
for atom_group in residue_group.atom_groups():
if (atom_group.altloc.strip() != ''):
all_n_confs[-1] += 1
all_n_confs_uniq = set(all_n_confs)
if (len(all_n_confs_uniq) != 1):
return sorted(list(all_n_confs_uniq))
return all_n_confs_uniq.pop()
def n_confs_max(self):
n_confs = self.n_confs()
if isinstance(n_confs, int):
return n_confs
return max(n_confs)
def minimum_atom_group_occupancy(self):
occ_min = 1.
for rg in self.residue_groups :
for ag in rg.atom_groups():
ag_atoms = ag.atoms()
total = 0
n_non_hd = 0
for atom in ag.atoms():
if (atom.element.strip() not in ["H", "D"]) and (atom.occ != 0):
total += atom.occ
n_non_hd += 1
if (total != 0):
occ_mean_ag = total / n_non_hd
occ_min = min(occ_min, occ_mean_ag)
return occ_min
def get_all_conformer_distances(self, backbone=None):
n_confs = self.n_confs()
assert isinstance(n_confs, int)
pairwise_distances = []
for i_conf in range(n_confs - 1):
indices = [i_conf, i_conf + 1]
pairwise_distances.append(self.get_conformer_distances(
conformer_indices=indices,
backbone=backbone))
return pairwise_distances
def get_conformer_distances(self,
conformer_indices=Auto,
backbone=None):
"""
Calculate the distances between atoms in the specified pair of conformers
(must be present for all residue groups).
"""
# XXX the way this is handled is somewhat clumsy, but necessary because
# there is no requirement that atom groups have the same number of atoms or
# even the same chemical identity (although they are assumed to be amino
# acids)
distances = []
for rg in self.residue_groups :
i_ag = 0
atom_groups = rg.atom_groups()
if (conformer_indices is Auto):
if (atom_groups[0].altloc.strip() == ''):
if (len(atom_groups) <= 2):
continue
else :
conformer_indices = (1,2)
else :
conformer_indices = (0,1)
else :
assert (len(conformer_indices) == 2)
ag1 = rg.atom_groups()[conformer_indices[0]]
ag2 = rg.atom_groups()[conformer_indices[1]]
if ((ag1.altloc.strip() == '') and
(conformer_indices[0] == 0) and
(len(atom_groups) >= 3)):
ag1 = rg.atom_groups()[conformer_indices[0]+1]
ag2 = rg.atom_groups()[conformer_indices[1]+1]
for atom1 in ag1.atoms():
name = atom1.name.strip()
element = atom1.element.upper().strip()
if (element in ["H","D"]):
continue
if (backbone is not None):
if (((backbone) and (not name in ["C","CA","N","O"])) or
((not backbone) and (name in ["C","CA","N","O"]))):
continue
for atom2 in ag2.atoms():
if (atom1.name == atom2.name):
distances.append(abs(col(atom1.xyz) - col(atom2.xyz)))
return distances
def max_distance_between_conformers(self, backbone=None):
paired_distances = self.get_all_conformer_distances(backbone=backbone)
paired_max = []
for distances in paired_distances :
if (len(distances) > 0):
paired_max.append(max(distances))
if (len(paired_max) > 0):
return max(paired_max)
return None
def max_rmsd_between_conformers(self, backbone=None):
paired_distances = self.get_all_conformer_distances(backbone=backbone)
rmsd_max = None
for distances in paired_distances :
if (len(distances) == 0):
continue
rmsd = math.sqrt(sum([ dxyz**2 for dxyz in distances]) / len(distances))
if (rmsd_max is None) or (rmsd > rmsd_max):
rmsd_max = rmsd
return rmsd_max
def extract_validation_results(self, multi_criterion):
"""
Find the matching validation result objects from the multi-criterion
object (see mmtbx/validation/molprobity/__init__.py).
"""
for rg in self.residue_groups :
self.outliers[rg.id_str()] = []
self.rotamers[rg.id_str()] = []
self.ramachandran[rg.id_str()] = []
results = multi_criterion.get_residue_group_data(rg)
for result in results.outliers :
if result.is_outlier():
self.outliers[rg.id_str()].append(result)
if type(result).__name__ == "rotamer" :
self.rotamers[rg.id_str()].append(result)
elif type(result).__name__ == "ramachandran" :
self.ramachandran[rg.id_str()].append(result)
def n_rotamer_changes(self, resname=None):
n_changes = 0
for rg in self.residue_groups :
resnames = set([ ag.resname.upper() for ag in rg.atom_groups() ])
if (len(resnames) > 1):
continue
elif (resname is not None) and (resnames.pop() != resname.upper()):
continue
rotamers = set(self.rotamers.get(rg.id_str(), []))
if (len(rotamers) > 1):
n_changes += 1
return n_changes
def find_peptide_flips(self, angle_cutoff=150):
residues_and_angles = []
for rg in self.residue_groups :
peptide_angle = carbonyl_oxygen_angle(rg)
if (peptide_angle >= angle_cutoff):
residues_and_angles.append((rg.id_str(), peptide_angle))
return residues_and_angles
def n_cbeta_outliers(self):
return self.n_outliers_of_type(analysis_type='cbeta')
def n_outliers_of_type(self, analysis_type):
n_outliers = 0
for rg in self.residue_groups :
results = self.outliers.get(rg.id_str(), [])
for result in results :
if (type(result).__name__ == analysis_type) and result.is_outlier():
n_outliers += 1
return n_outliers
#-----------------------------------------------------------------------
# utility methods
def is_joined_at_calpha(residue_group):
for atom_group in residue_group.atom_groups():
if (atom_group.altloc.strip() == ''):
for atom in atom_group.atoms():
if (atom.name == " CA "):
return True
return False
def carbonyl_oxygen_angle(residue_group):
"""
Calculate angles between carbonyl oxygen (C=O) bonds in each pair of atom
groups, and return the maximum value (or None if fewer than two such bonds
are found).
"""
c_o_vectors = []
for atom_group in residue_group.atom_groups():
c_xyz = o_xyz = None
for atom in atom_group.atoms():
if (atom.name.strip() == "O"):
o_xyz = col(atom.xyz)
elif (atom.name.strip() == "C"):
c_xyz = col(atom.xyz)
if (not None in [c_xyz, o_xyz]):
c_o_vectors.append(c_xyz - o_xyz)
if (len(c_o_vectors) >= 2):
angles = []
i_ag = 0
while (i_ag < len(c_o_vectors) - 1):
angles.append(c_o_vectors[i_ag].angle(c_o_vectors[i_ag+1], deg=True))
i_ag += 1
return max(angles)
return None
def only_amide_hydrogen_split(residue_group):
"""
Detect cases where the only alternate conformation is for the amide hydrogen,
presumably because the previous residue was split and Reduce was used to
add hydrogens. These residues are ignored in our analyses.
"""
for atom in residue_group.atoms():
labels = atom.fetch_labels()
if (labels.altloc.strip() != '') and (atom.name != " H "):
return False
return True
# XXX unused?
def get_nconfs(pdb_hierarchy):
"""
Count the number of conformers in a structure.
"""
if (len(pdb_hierarchy.models()) > 1):
n_confs = -1 # multiple MODELs aren't handled
else :
for chain in pdb_hierarchy.only_model().chains():
if (chain.is_protein()):
confs = chain.conformers()
if (len(confs) > n_confs):
n_confs = len(confs)
return n_confs
#-----------------------------------------------------------------------
class process_residue_groups(object):
def __init__(self, chain,
multi_criterion_validation=None,
ignore_inconsistent_occupancy=False,
log=sys.stdout):
self.segments = []
self.chain_id = chain.id
self.n_residue_groups = 0
self.n_disordered = 0
self.residue_counts = {}
self.disordered_residue_counts = {}
assert chain.is_protein()
segment = None
for residue_group in chain.residue_groups():
self.n_residue_groups += 1
atom_groups = residue_group.atom_groups()
resname_1 = atom_groups[0].resname
if (not resname_1 in self.residue_counts):
self.residue_counts[resname_1] = 0
self.residue_counts[resname_1] += 1
if (len(atom_groups) > 1):
self.n_disordered += 1
if only_amide_hydrogen_split(residue_group):
print(" residue %s only has alt. confs. for H" % \
residue_group.id_str(), file=log)
segment = None
continue
else :
if (not resname_1 in self.disordered_residue_counts):
self.disordered_residue_counts[resname_1] = 0
self.disordered_residue_counts[resname_1] += 1
if (segment is None):
segment = disordered_segment(residue_group)
self.segments.append(segment)
else :
if segment.is_part_of_segment(other=residue_group,
ignore_inconsistent_occupancy=ignore_inconsistent_occupancy):
segment.append_residue_group(residue_group)
else :
segment = disordered_segment(residue_group)
self.segments.append(segment)
else :
segment = None
if (multi_criterion_validation is not None):
for segment in self.segments :
segment.extract_validation_results(multi_criterion_validation)
def show(self, prefix="", out=sys.stdout):
print(prefix+"Chain '%s': %d residues, %d disordered" % (
self.chain_id, self.n_residue_groups, self.n_disordered), file=out)
for segment in self.segments :
segment.show(out=out, prefix=prefix+" ")
class process_pdb_hierarchy(object):
def __init__(self, pdb_hierarchy,
validation,
ignore_inconsistent_occupancy=False,
log=sys.stdout):
self.chains = []
self.n_residue_groups = 0
self.n_disordered = 0
self.sequence_disorder = []
self.n_rama_outliers = validation.ramalyze.n_outliers
self.n_rota_outliers = validation.rotalyze.n_outliers
self.n_cbeta_outliers = validation.cbetadev.n_outliers
multi_criterion_validation = None
if (validation is not None):
multi_criterion_validation = validation.as_multi_criterion_view()
for chain in pdb_hierarchy.only_model().chains():
if (chain.is_protein()):
print(" processing chain '%s'" % chain.id, file=log)
chain_info = process_residue_groups(chain=chain,
multi_criterion_validation=multi_criterion_validation,
ignore_inconsistent_occupancy=ignore_inconsistent_occupancy,
log=log)
self.chains.append(chain_info)
self.n_residue_groups += chain_info.n_residue_groups
self.n_disordered += chain_info.n_disordered
for segment in chain_info.segments :
self.sequence_disorder.extend(segment.detect_sequence_disorder())
else :
print(" skipping non-protein chain '%s'" % chain.id, file=log)
# TODO post-analysis
@property
def segments(self):
for chain in self.chains :
for segment in chain.segments :
yield segment
def max_rmsd_between_conformers(self, backbone=None):
rmsd_max = segment_max = None
for segment in self.segments :
rmsd = segment.max_rmsd_between_conformers(backbone=backbone)
if (rmsd_max is None) or (rmsd > rmsd_max):
rmsd_max = rmsd
segment_max = segment
return rmsd_max, segment_max
def max_distance_between_conformers(self, backbone=None):
dist_max = segment_max = None
for segment in self.segments :
dist = segment.max_distance_between_conformers(backbone=backbone)
if (dist_max is None) or (dist > dist_max):
dist_max = dist
segment_max = segment
return dist_max, segment_max
def show(self, out=sys.stdout, verbose=True):
print("", file=out)
print("Overall: %d protein chain(s)" % len(self.chains), file=out)
print(" %d residues" % self.n_residue_groups, file=out)
print(" %d disorered in %d segments" % (self.n_disordered,
sum([ len(c.segments) for c in self.chains ])), file=out)
if (len(self.sequence_disorder) > 0):
print("%d heterogeneous residues:" % len(self.sequence_disorder), file=out)
for rg_id, resnames in self.sequence_disorder :
print(" %s (%s)" % (rg_id, ",".join(resnames)))
n_rotamer_changes = n_cbeta_dev = n_partial_splits = 0
peptide_flips = []
for segment in self.segments :
n_rotamer_changes += segment.n_rotamer_changes()
n_cbeta_dev += segment.n_cbeta_outliers()
n_partial_splits += segment.n_partial_splits(join_at_calpha=True)
peptide_flips.extend(segment.find_peptide_flips())
print("%d disordered residues have multiple rotamers" % \
n_rotamer_changes, file=out)
if (n_partial_splits > 0):
print("%d disordered residues have a single C-alpha atom" % \
n_partial_splits, file=out)
if (n_cbeta_dev > 0):
print("%d disordered residues have C-beta deviations" % \
n_cbeta_dev, file=out)
if (len(peptide_flips) > 0):
print("%d apparent peptide flips:", file=out)
for residue_id_str, angle in peptide_flips :
print(" %s (angle=%.1f)" % (residue_id_str, angle), file=out)
# distances and RMSDs
rmsd_max, segment_max = self.max_rmsd_between_conformers()
rmsd_mc_max, segment_mc_max = self.max_rmsd_between_conformers(
backbone=True)
assert (rmsd_max is not None)
print("Max. RMSD between conformers:", file=out)
print(" %6.3f (%s) [all non-H atoms]" % (rmsd_max, segment_max), file=out)
if (rmsd_mc_max is not None):
print(" %6.3f (%s) [backbone only]" %(rmsd_mc_max,
segment_mc_max), file=out)
dist_max, segment_max = self.max_distance_between_conformers()
dist_mc_max, segment_mc_max = self.max_distance_between_conformers(
backbone=True)
assert (dist_max is not None)
print("Max. distance between conformers:", file=out)
print(" %6.3f (%s) [all non-H atoms]" % (dist_max, segment_max), file=out)
if (dist_mc_max is not None):
print(" %6.3f (%s) [backbone only]" %(dist_mc_max,
segment_mc_max), file=out)
# verbose output
if (verbose):
for chain in self.chains :
chain.show(out=out)
else :
print("Run with --verbose to show per-residue results.", file=out)
print("", file=out)
| en | 0.833865 | # XXX in order to make this run in parallel over many PDB IDs, I need to cheat # slightly and substitute pickle-able objects for the original classes in # iotbx.pdb.hierarchy. Note that parent relationships will be lost in the # process. Pickle-able stand-in for iotbx.pdb.hierarchy.residue_group. Pickle-able stand-in for iotbx.pdb.hierarchy.atom_group. Pickle-able stand-in for af::shared<atom> array, using the atom_with_labels objects as elements. A group of one or more adjacent residues presumed to form continuous alternate conformations. Determine whether a residue_group object is part of the same continuous disordered segment. The precise meaning of this can be adjusted depending on user preferences; by default a continuous segment must have the same number of conformers for each residue, and occupancies must be constrained for each conformation. The latter assumption will probably be violated most often. Find any residue groups with heterogeneous chemical identity. Count the number of residues where not all atoms have alternates. Count the number of alternate conformations. Sometimes this may not be the same for all residue groups, in which case a list is returned. Calculate the distances between atoms in the specified pair of conformers (must be present for all residue groups). # XXX the way this is handled is somewhat clumsy, but necessary because # there is no requirement that atom groups have the same number of atoms or # even the same chemical identity (although they are assumed to be amino # acids) Find the matching validation result objects from the multi-criterion object (see mmtbx/validation/molprobity/__init__.py). #----------------------------------------------------------------------- # utility methods Calculate angles between carbonyl oxygen (C=O) bonds in each pair of atom groups, and return the maximum value (or None if fewer than two such bonds are found). Detect cases where the only alternate conformation is for the amide hydrogen, presumably because the previous residue was split and Reduce was used to add hydrogens. These residues are ignored in our analyses. # XXX unused? Count the number of conformers in a structure. # multiple MODELs aren't handled #----------------------------------------------------------------------- # TODO post-analysis # distances and RMSDs # verbose output | 1.837649 | 2 |
tree.py | andribas404/fluffy-palm-tree | 0 | 6633048 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Имплементация алгоритма Дейкстры с использованием Фибоначчиевых куч.
With zero dependencies (no import).
class FibonacciHeap - Фибоначчиева куча.
class Graph - Граф с вершинами и ребрами.
class AlgorithmDijkstra - Реализация алгоритма Дейкстры.
"""
class FibonacciHeap:
"""Фибоначчиева куча."""
class Node:
"""Heap Node."""
def __init__(self, x, key):
"""Node initialization."""
# Содержимое узла.
self.x = x
# Ключ
self.key = key
# Предок узла
self.parent = None
# Левый братский / сестринский узел
self.left = None
# Правый братский / сестринский узел
self.right = None
# Прямой потомок узла
self.child = None
# Ранг узла = кол-во прямых потомков
self.rank = 0
# Перемещались ли ранее потомки этого узла
self.marked = False
def _extract(self):
"""Удаление связей перед переносом узла."""
self.parent = None
self.left = None
self.right = None
def __repr__(self):
"""Node representation."""
return 'Node(x={})'.format(self.x)
def __init__(self, node=None):
"""
Создание новой фибоначчиевой кучи.
Время работы: O(1)
"""
self.min_node = node
def insert(self, node):
"""
Вставка узла node в список корневых узлов.
Время работы: O(1)
"""
h2 = FibonacciHeap()
h2._set_min(node)
self.meld(h2)
def _set_min(self, node):
"""
Установка минимального узла.
Время работы: O(1)
"""
self.min_node = node
def _update_min(self, node):
"""
Обновление минимального узла, если ключ меньше.
Время работы: O(1)
"""
current = self.find_min()
if not current:
self._set_min(node)
elif node and node.key <= current.key:
self._set_min(node)
def find_min(self):
"""
Поиск минимального узла.
Время работы: O(1)
"""
return self.min_node
def meld(self, h):
"""
Объединение двух фибоначчиевых куч.
Время работы: O(1)
"""
node1 = self.find_min()
node2 = h.find_min()
# Склеивание двух двусвязных списков (колец)
# x - удаляемая связь
# left1 <-x node1 -> right1
# X
# left2 <-x node2 -> right2
# Добавляемая куча пуста
if not node2:
return
# Исходная куча пуста
if not node1:
self._set_min(node2)
return
# Поскольку список двусвязный кольцевой, то если есть левый узел,
# то существует правый (равен левому или другому)
# Если в списке 1 элемент, то он не указывает сам на себя = None
left1 = node1.left
left2 = node2.left
# В исходной куче 1 корневой узел
if not left1:
if left2:
# По левому узлу второй кучи
# node1
# | |
# left2 <-x node2
node1.left = left2
node1.right = node2
left2.right = node1
node2.left = node1
else:
# В обеих кучах 1 корневой узел
# node1
# |
# node2
node1.left = node1.right = node2
node2.left = node2.right = node1
else:
# Склеиваем через левый корневой узел второй кучи
if left2:
# left1 <-x node1
# X
# left2 <-x node2
# наискосок
left1.right = node2
node1.left = left2
left2.right = node1
node2.left = left1
# Во второй куче 1 корневой узел
else:
# left1 <-x node1
# | |
# node2
node2.left = left1
node2.right = node1
left1.right = node2
node1.left = node2
# Если нужно, обновляем минимум
self._update_min(node2)
def delete_min(self):
r"""
Извлечение минимального узла.
x
/ | \
c1 c2 c3
Амортизированное время работы: O(log n)
"""
root = self.find_min()
if not root:
raise ValueError('Куча пуста')
# Устанавливаем временно минимальный узел на левый
self._set_min(root.left)
# Удаляем из списка минимальный узел
self._unlink(root)
# Создаем новую кучу из потомков root (у них прежний parent)
h = FibonacciHeap(root.child)
self.meld(h)
self._consolidate()
root._extract()
root.child = None
return root
def _unlink(self, node):
"""
Извлечение узла из двухсвязного списка.
Возвращает левый узел из оставшихся в списке, либо None
left - node - right = left - right
Время работы: O(1)
"""
left = node.left
right = node.right
# В списке 1 элемент - удаляемый
if not left:
return None
if left == right:
# В списке было 2 элемента
left.left = left.right = None
else:
left.right = right
right.left = left
return left
def _consolidate(self):
"""
Уплотнение списка корней - склеивание деревьев с одинаковым рангом.
Обновляет минимальный узел
и устанавливает parent=None для всех корневых узлов
Время работы: O(log n)
"""
# временный минимальный узел
root = self.find_min()
if not root:
return
# Словарь корневых узлов вида ранг -> узел
ranked = dict()
ranked[root.rank] = root
root.parent = None
node = root.right
while node:
# У корня нет предков
node.parent = None
# Текущий узел
melded = node
# Следующий просматриваемый узел
node = node.right
if ranked.get(node.rank, None) == node:
# Мы там уже были, поэтому эта итерация последняя
node = None
while melded.rank in ranked:
# В списке корней есть дерево с таким же рангом.
rank = melded.rank
# Склеиваем
melded = self._link(melded, ranked[rank])
# и удаляем из словаря прежний ранг
del ranked[rank]
# обновляем с новым значением ранга получившееся дерево
ranked[melded.rank] = melded
# Обновляем минимальный узел
self._update_min(melded)
def _link(self, node1, node2):
"""
Склеивание двух корней.
Корнем становится узел с меньшим ключом, второй - его потомком
Возвращает получившийся корень
Время работы: O(1)
"""
if node1.key > node2.key:
node1, node2 = node2, node1
# node1 node1
# | -> |
# child node2 - child
# node2 извлекается из списка корней
self._unlink(node2)
node2._extract()
# убирается отметка
node2.marked = False
# и он становится потомком node1
node2.parent = node1
# Обновляем ранг получившегося дерева
node1.rank += 1
# Потомок первого корня
child = node1.child
if not child:
# Если нет потомков
node1.child = node2
else:
left = child.left
if not left:
# Один потомок
# child - node2
child.left = child.right = node2
node2.left = node2.right = child
else:
# left <-x child
# | |
# node2
node2.left = left
node2.right = child
left.right = node2
child.left = node2
return node1
def decrease_key(self, node, newkey):
"""
Уменьшение ключа узла node до значения newkey.
Время работы: O(1)
"""
assert newkey < node.key
node.key = newkey
if not node.parent:
# Узел - корневой
self._update_min(node)
return
parent = node.parent
parent.rank -= 1
parent.child = self._unlink(node)
self._cascading_cut(parent)
node._extract()
self.insert(node)
def _cut(self, node):
"""
Подрезка дерева - перенос node в список корней.
Время работы: O(1)
"""
assert node is not None
parent = node.parent
if not parent:
# Узел уже корневой
return
parent.rank -= 1
parent.child = self._unlink(node)
node._extract()
self.insert(node)
def _cascading_cut(self, node):
"""
Каскадная подрезка дерева.
Начиная от узла node, и пока предшествующий узел имеет отметку
о перемещении (marked = True), все они становятся корневыми.
Время работы: O(log n)
"""
parent = node
while parent:
if not parent.marked:
parent.marked = True
return
else:
node = parent
parent = node.parent
self._cut(node)
def delete(self, node):
"""
Удаление узла node.
Амортизированное время работы: O(log n)
"""
if node == self.find_min():
# Узел - минимальный
return self.delete_min()
parent = node.parent
if not parent:
# Узел - корневой
self._unlink(node)
else:
parent.rank -= 1
parent.child = self._unlink(node)
self._cascading_cut(parent)
h = FibonacciHeap(node.child)
self.meld(h)
self._consolidate()
node._extract()
node.child = None
return node
class Graph:
"""Граф с вершинами и ребрами."""
class Vertex:
"""Вершина графа."""
def __init__(self, x):
"""Vertex initialization."""
self.x = x
self.edges = []
def __init__(self, n, edges):
"""
Инициализация графа.
Вершины пронумерованы от 1 до n
edges - список ребер в формате [(вершина1, вершина2, вес ребра),...]
Полагаем, что веса неотрицательные
"""
# self.nodes[i] = Vertex(i+1)
self.nodes = [Graph.Vertex(x) for x in range(1, n+1)]
for v1, v2, weight in edges:
node1 = self.nodes[v1-1]
node2 = self.nodes[v2-1]
node1.edges.append((node2, weight))
node2.edges.append((node1, weight))
class AlgorithmDijkstra:
"""
Реализация алгоритма Дейкстры.
Находит кратчайший путь от заданной вершины до всех других вершин графа.
"""
class Link:
"""
Link structure.
Связывает вершину исходного графа,
соответствующий ей узел в очереди на просмотр,
текущее расстояние до нее,
а также предшествующую ей вершину в оптимальном маршруте
"""
UNLABELED = 'unlabeled'
LABELED = 'labeled'
SCANNED = 'scanned'
def __init__(self, v):
"""Link initialization."""
self.vertex = v
self.heap_node = None
self.distance = None
self.count = 0
self.pred = None
self.label = AlgorithmDijkstra.Link.UNLABELED
def __init__(self):
"""Graph initialization."""
pass
def solve(self, graph, start_ind):
"""
Находит кратчайший путь.
Находит кратчайший путь от вершины с номером start_ind до всех других
вершин графа graph.
"""
links = [AlgorithmDijkstra.Link(v) for v in graph.nodes]
heap = FibonacciHeap()
heap_node = FibonacciHeap.Node(start_ind, 0)
link_start = links[start_ind - 1]
link_start.distance = 0
link_start.count = 1
link_start.heap_node = heap_node
link_start.label = AlgorithmDijkstra.Link.LABELED
heap.insert(heap_node)
while True:
try:
# Извлекаем из очереди вершину с минимальным расстоянием до нее
heap_node = heap.delete_min()
link = links[heap_node.x - 1]
link.label = AlgorithmDijkstra.Link.SCANNED
# Проход по всем вершинам, смежных с текущей
for vertex, weight in link.vertex.edges:
# Суммарное расстояние до смежной
distance = link.distance + weight
# Индекс смежной вершины
vertex_ind = vertex.x
# Соответствующая запись в таблице связей
link_next = links[vertex_ind - 1]
if link_next.label == AlgorithmDijkstra.Link.SCANNED:
continue
if link_next.distance is None:
# Если ранее в этой вершине не были то добавляем ее
# в очередь на просмотр с ключом равным текущему
# расстоянию и сохраняем связь
heap_node = FibonacciHeap.Node(vertex_ind, distance)
heap.insert(heap_node)
link_next.heap_node = heap_node
link_next.distance = distance
link_next.pred = [link.vertex.x]
link_next.count = link.count
link_next.label = AlgorithmDijkstra.Link.LABELED
else:
# Вершина уже находится в очереди на просмотр
if distance < link_next.distance:
# и расстояние через текущую вершину короче
heap.decrease_key(link_next.heap_node, distance)
link_next.distance = distance
link_next.pred = [link.vertex.x]
link_next.count = link.count
elif distance == link_next.distance:
link_next.pred.append(link.vertex.x)
link_next.count += link.count
except ValueError:
# Конец очереди
break
return links
def find_distances(self, links, start_ind):
"""Возвращаем список расстояний до вершин."""
# Возвращаем список расстояний до вершин пропуская вершину s
# Всего (n-1) значение. Если вершина недостижима, расстояние = -1
distances = []
for link in links:
if link.vertex.x == start_ind:
continue
if link.distance is None:
distances.append(-1)
else:
distances.append(link.distance)
return distances
def find_num_of_shortest_paths(self, links, start_ind, finish_ind):
"""Возвращаем количество крайтайших расстояний."""
finish_link = links[finish_ind - 1]
return finish_link.count
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Имплементация алгоритма Дейкстры с использованием Фибоначчиевых куч.
With zero dependencies (no import).
class FibonacciHeap - Фибоначчиева куча.
class Graph - Граф с вершинами и ребрами.
class AlgorithmDijkstra - Реализация алгоритма Дейкстры.
"""
class FibonacciHeap:
"""Фибоначчиева куча."""
class Node:
"""Heap Node."""
def __init__(self, x, key):
"""Node initialization."""
# Содержимое узла.
self.x = x
# Ключ
self.key = key
# Предок узла
self.parent = None
# Левый братский / сестринский узел
self.left = None
# Правый братский / сестринский узел
self.right = None
# Прямой потомок узла
self.child = None
# Ранг узла = кол-во прямых потомков
self.rank = 0
# Перемещались ли ранее потомки этого узла
self.marked = False
def _extract(self):
"""Удаление связей перед переносом узла."""
self.parent = None
self.left = None
self.right = None
def __repr__(self):
"""Node representation."""
return 'Node(x={})'.format(self.x)
def __init__(self, node=None):
"""
Создание новой фибоначчиевой кучи.
Время работы: O(1)
"""
self.min_node = node
def insert(self, node):
"""
Вставка узла node в список корневых узлов.
Время работы: O(1)
"""
h2 = FibonacciHeap()
h2._set_min(node)
self.meld(h2)
def _set_min(self, node):
"""
Установка минимального узла.
Время работы: O(1)
"""
self.min_node = node
def _update_min(self, node):
"""
Обновление минимального узла, если ключ меньше.
Время работы: O(1)
"""
current = self.find_min()
if not current:
self._set_min(node)
elif node and node.key <= current.key:
self._set_min(node)
def find_min(self):
"""
Поиск минимального узла.
Время работы: O(1)
"""
return self.min_node
def meld(self, h):
"""
Объединение двух фибоначчиевых куч.
Время работы: O(1)
"""
node1 = self.find_min()
node2 = h.find_min()
# Склеивание двух двусвязных списков (колец)
# x - удаляемая связь
# left1 <-x node1 -> right1
# X
# left2 <-x node2 -> right2
# Добавляемая куча пуста
if not node2:
return
# Исходная куча пуста
if not node1:
self._set_min(node2)
return
# Поскольку список двусвязный кольцевой, то если есть левый узел,
# то существует правый (равен левому или другому)
# Если в списке 1 элемент, то он не указывает сам на себя = None
left1 = node1.left
left2 = node2.left
# В исходной куче 1 корневой узел
if not left1:
if left2:
# По левому узлу второй кучи
# node1
# | |
# left2 <-x node2
node1.left = left2
node1.right = node2
left2.right = node1
node2.left = node1
else:
# В обеих кучах 1 корневой узел
# node1
# |
# node2
node1.left = node1.right = node2
node2.left = node2.right = node1
else:
# Склеиваем через левый корневой узел второй кучи
if left2:
# left1 <-x node1
# X
# left2 <-x node2
# наискосок
left1.right = node2
node1.left = left2
left2.right = node1
node2.left = left1
# Во второй куче 1 корневой узел
else:
# left1 <-x node1
# | |
# node2
node2.left = left1
node2.right = node1
left1.right = node2
node1.left = node2
# Если нужно, обновляем минимум
self._update_min(node2)
def delete_min(self):
r"""
Извлечение минимального узла.
x
/ | \
c1 c2 c3
Амортизированное время работы: O(log n)
"""
root = self.find_min()
if not root:
raise ValueError('Куча пуста')
# Устанавливаем временно минимальный узел на левый
self._set_min(root.left)
# Удаляем из списка минимальный узел
self._unlink(root)
# Создаем новую кучу из потомков root (у них прежний parent)
h = FibonacciHeap(root.child)
self.meld(h)
self._consolidate()
root._extract()
root.child = None
return root
def _unlink(self, node):
"""
Извлечение узла из двухсвязного списка.
Возвращает левый узел из оставшихся в списке, либо None
left - node - right = left - right
Время работы: O(1)
"""
left = node.left
right = node.right
# В списке 1 элемент - удаляемый
if not left:
return None
if left == right:
# В списке было 2 элемента
left.left = left.right = None
else:
left.right = right
right.left = left
return left
def _consolidate(self):
"""
Уплотнение списка корней - склеивание деревьев с одинаковым рангом.
Обновляет минимальный узел
и устанавливает parent=None для всех корневых узлов
Время работы: O(log n)
"""
# временный минимальный узел
root = self.find_min()
if not root:
return
# Словарь корневых узлов вида ранг -> узел
ranked = dict()
ranked[root.rank] = root
root.parent = None
node = root.right
while node:
# У корня нет предков
node.parent = None
# Текущий узел
melded = node
# Следующий просматриваемый узел
node = node.right
if ranked.get(node.rank, None) == node:
# Мы там уже были, поэтому эта итерация последняя
node = None
while melded.rank in ranked:
# В списке корней есть дерево с таким же рангом.
rank = melded.rank
# Склеиваем
melded = self._link(melded, ranked[rank])
# и удаляем из словаря прежний ранг
del ranked[rank]
# обновляем с новым значением ранга получившееся дерево
ranked[melded.rank] = melded
# Обновляем минимальный узел
self._update_min(melded)
def _link(self, node1, node2):
"""
Склеивание двух корней.
Корнем становится узел с меньшим ключом, второй - его потомком
Возвращает получившийся корень
Время работы: O(1)
"""
if node1.key > node2.key:
node1, node2 = node2, node1
# node1 node1
# | -> |
# child node2 - child
# node2 извлекается из списка корней
self._unlink(node2)
node2._extract()
# убирается отметка
node2.marked = False
# и он становится потомком node1
node2.parent = node1
# Обновляем ранг получившегося дерева
node1.rank += 1
# Потомок первого корня
child = node1.child
if not child:
# Если нет потомков
node1.child = node2
else:
left = child.left
if not left:
# Один потомок
# child - node2
child.left = child.right = node2
node2.left = node2.right = child
else:
# left <-x child
# | |
# node2
node2.left = left
node2.right = child
left.right = node2
child.left = node2
return node1
def decrease_key(self, node, newkey):
"""
Уменьшение ключа узла node до значения newkey.
Время работы: O(1)
"""
assert newkey < node.key
node.key = newkey
if not node.parent:
# Узел - корневой
self._update_min(node)
return
parent = node.parent
parent.rank -= 1
parent.child = self._unlink(node)
self._cascading_cut(parent)
node._extract()
self.insert(node)
def _cut(self, node):
"""
Подрезка дерева - перенос node в список корней.
Время работы: O(1)
"""
assert node is not None
parent = node.parent
if not parent:
# Узел уже корневой
return
parent.rank -= 1
parent.child = self._unlink(node)
node._extract()
self.insert(node)
def _cascading_cut(self, node):
"""
Каскадная подрезка дерева.
Начиная от узла node, и пока предшествующий узел имеет отметку
о перемещении (marked = True), все они становятся корневыми.
Время работы: O(log n)
"""
parent = node
while parent:
if not parent.marked:
parent.marked = True
return
else:
node = parent
parent = node.parent
self._cut(node)
def delete(self, node):
"""
Удаление узла node.
Амортизированное время работы: O(log n)
"""
if node == self.find_min():
# Узел - минимальный
return self.delete_min()
parent = node.parent
if not parent:
# Узел - корневой
self._unlink(node)
else:
parent.rank -= 1
parent.child = self._unlink(node)
self._cascading_cut(parent)
h = FibonacciHeap(node.child)
self.meld(h)
self._consolidate()
node._extract()
node.child = None
return node
class Graph:
"""Граф с вершинами и ребрами."""
class Vertex:
"""Вершина графа."""
def __init__(self, x):
"""Vertex initialization."""
self.x = x
self.edges = []
def __init__(self, n, edges):
"""
Инициализация графа.
Вершины пронумерованы от 1 до n
edges - список ребер в формате [(вершина1, вершина2, вес ребра),...]
Полагаем, что веса неотрицательные
"""
# self.nodes[i] = Vertex(i+1)
self.nodes = [Graph.Vertex(x) for x in range(1, n+1)]
for v1, v2, weight in edges:
node1 = self.nodes[v1-1]
node2 = self.nodes[v2-1]
node1.edges.append((node2, weight))
node2.edges.append((node1, weight))
class AlgorithmDijkstra:
"""
Реализация алгоритма Дейкстры.
Находит кратчайший путь от заданной вершины до всех других вершин графа.
"""
class Link:
"""
Link structure.
Связывает вершину исходного графа,
соответствующий ей узел в очереди на просмотр,
текущее расстояние до нее,
а также предшествующую ей вершину в оптимальном маршруте
"""
UNLABELED = 'unlabeled'
LABELED = 'labeled'
SCANNED = 'scanned'
def __init__(self, v):
"""Link initialization."""
self.vertex = v
self.heap_node = None
self.distance = None
self.count = 0
self.pred = None
self.label = AlgorithmDijkstra.Link.UNLABELED
def __init__(self):
"""Graph initialization."""
pass
def solve(self, graph, start_ind):
"""
Находит кратчайший путь.
Находит кратчайший путь от вершины с номером start_ind до всех других
вершин графа graph.
"""
links = [AlgorithmDijkstra.Link(v) for v in graph.nodes]
heap = FibonacciHeap()
heap_node = FibonacciHeap.Node(start_ind, 0)
link_start = links[start_ind - 1]
link_start.distance = 0
link_start.count = 1
link_start.heap_node = heap_node
link_start.label = AlgorithmDijkstra.Link.LABELED
heap.insert(heap_node)
while True:
try:
# Извлекаем из очереди вершину с минимальным расстоянием до нее
heap_node = heap.delete_min()
link = links[heap_node.x - 1]
link.label = AlgorithmDijkstra.Link.SCANNED
# Проход по всем вершинам, смежных с текущей
for vertex, weight in link.vertex.edges:
# Суммарное расстояние до смежной
distance = link.distance + weight
# Индекс смежной вершины
vertex_ind = vertex.x
# Соответствующая запись в таблице связей
link_next = links[vertex_ind - 1]
if link_next.label == AlgorithmDijkstra.Link.SCANNED:
continue
if link_next.distance is None:
# Если ранее в этой вершине не были то добавляем ее
# в очередь на просмотр с ключом равным текущему
# расстоянию и сохраняем связь
heap_node = FibonacciHeap.Node(vertex_ind, distance)
heap.insert(heap_node)
link_next.heap_node = heap_node
link_next.distance = distance
link_next.pred = [link.vertex.x]
link_next.count = link.count
link_next.label = AlgorithmDijkstra.Link.LABELED
else:
# Вершина уже находится в очереди на просмотр
if distance < link_next.distance:
# и расстояние через текущую вершину короче
heap.decrease_key(link_next.heap_node, distance)
link_next.distance = distance
link_next.pred = [link.vertex.x]
link_next.count = link.count
elif distance == link_next.distance:
link_next.pred.append(link.vertex.x)
link_next.count += link.count
except ValueError:
# Конец очереди
break
return links
def find_distances(self, links, start_ind):
"""Возвращаем список расстояний до вершин."""
# Возвращаем список расстояний до вершин пропуская вершину s
# Всего (n-1) значение. Если вершина недостижима, расстояние = -1
distances = []
for link in links:
if link.vertex.x == start_ind:
continue
if link.distance is None:
distances.append(-1)
else:
distances.append(link.distance)
return distances
def find_num_of_shortest_paths(self, links, start_ind, finish_ind):
"""Возвращаем количество крайтайших расстояний."""
finish_link = links[finish_ind - 1]
return finish_link.count
| ru | 0.993883 | #!/usr/bin/env python # -*- coding: utf-8 -*- Имплементация алгоритма Дейкстры с использованием Фибоначчиевых куч. With zero dependencies (no import). class FibonacciHeap - Фибоначчиева куча. class Graph - Граф с вершинами и ребрами. class AlgorithmDijkstra - Реализация алгоритма Дейкстры. Фибоначчиева куча. Heap Node. Node initialization. # Содержимое узла. # Ключ # Предок узла # Левый братский / сестринский узел # Правый братский / сестринский узел # Прямой потомок узла # Ранг узла = кол-во прямых потомков # Перемещались ли ранее потомки этого узла Удаление связей перед переносом узла. Node representation. Создание новой фибоначчиевой кучи. Время работы: O(1) Вставка узла node в список корневых узлов. Время работы: O(1) Установка минимального узла. Время работы: O(1) Обновление минимального узла, если ключ меньше. Время работы: O(1) Поиск минимального узла. Время работы: O(1) Объединение двух фибоначчиевых куч. Время работы: O(1) # Склеивание двух двусвязных списков (колец) # x - удаляемая связь # left1 <-x node1 -> right1 # X # left2 <-x node2 -> right2 # Добавляемая куча пуста # Исходная куча пуста # Поскольку список двусвязный кольцевой, то если есть левый узел, # то существует правый (равен левому или другому) # Если в списке 1 элемент, то он не указывает сам на себя = None # В исходной куче 1 корневой узел # По левому узлу второй кучи # node1 # | | # left2 <-x node2 # В обеих кучах 1 корневой узел # node1 # | # node2 # Склеиваем через левый корневой узел второй кучи # left1 <-x node1 # X # left2 <-x node2 # наискосок # Во второй куче 1 корневой узел # left1 <-x node1 # | | # node2 # Если нужно, обновляем минимум Извлечение минимального узла. x / | \ c1 c2 c3 Амортизированное время работы: O(log n) # Устанавливаем временно минимальный узел на левый # Удаляем из списка минимальный узел # Создаем новую кучу из потомков root (у них прежний parent) Извлечение узла из двухсвязного списка. Возвращает левый узел из оставшихся в списке, либо None left - node - right = left - right Время работы: O(1) # В списке 1 элемент - удаляемый # В списке было 2 элемента Уплотнение списка корней - склеивание деревьев с одинаковым рангом. Обновляет минимальный узел и устанавливает parent=None для всех корневых узлов Время работы: O(log n) # временный минимальный узел # Словарь корневых узлов вида ранг -> узел # У корня нет предков # Текущий узел # Следующий просматриваемый узел # Мы там уже были, поэтому эта итерация последняя # В списке корней есть дерево с таким же рангом. # Склеиваем # и удаляем из словаря прежний ранг # обновляем с новым значением ранга получившееся дерево # Обновляем минимальный узел Склеивание двух корней. Корнем становится узел с меньшим ключом, второй - его потомком Возвращает получившийся корень Время работы: O(1) # node1 node1 # | -> | # child node2 - child # node2 извлекается из списка корней # убирается отметка # и он становится потомком node1 # Обновляем ранг получившегося дерева # Потомок первого корня # Если нет потомков # Один потомок # child - node2 # left <-x child # | | # node2 Уменьшение ключа узла node до значения newkey. Время работы: O(1) # Узел - корневой Подрезка дерева - перенос node в список корней. Время работы: O(1) # Узел уже корневой Каскадная подрезка дерева. Начиная от узла node, и пока предшествующий узел имеет отметку о перемещении (marked = True), все они становятся корневыми. Время работы: O(log n) Удаление узла node. Амортизированное время работы: O(log n) # Узел - минимальный # Узел - корневой Граф с вершинами и ребрами. Вершина графа. Vertex initialization. Инициализация графа. Вершины пронумерованы от 1 до n edges - список ребер в формате [(вершина1, вершина2, вес ребра),...] Полагаем, что веса неотрицательные # self.nodes[i] = Vertex(i+1) Реализация алгоритма Дейкстры. Находит кратчайший путь от заданной вершины до всех других вершин графа. Link structure. Связывает вершину исходного графа, соответствующий ей узел в очереди на просмотр, текущее расстояние до нее, а также предшествующую ей вершину в оптимальном маршруте Link initialization. Graph initialization. Находит кратчайший путь. Находит кратчайший путь от вершины с номером start_ind до всех других вершин графа graph. # Извлекаем из очереди вершину с минимальным расстоянием до нее # Проход по всем вершинам, смежных с текущей # Суммарное расстояние до смежной # Индекс смежной вершины # Соответствующая запись в таблице связей # Если ранее в этой вершине не были то добавляем ее # в очередь на просмотр с ключом равным текущему # расстоянию и сохраняем связь # Вершина уже находится в очереди на просмотр # и расстояние через текущую вершину короче # Конец очереди Возвращаем список расстояний до вершин. # Возвращаем список расстояний до вершин пропуская вершину s # Всего (n-1) значение. Если вершина недостижима, расстояние = -1 Возвращаем количество крайтайших расстояний. | 3.117894 | 3 |
vgg.py | brekkanegg/cram | 1 | 6633049 | <reponame>brekkanegg/cram
import tensorflow as tf
import os, sys
import numpy as np
slim = tf.contrib.slim
class VGG():
def __init__(self, config, inputs):
self.config = config
self.image_size = inputs.image_size
self.image_shape = [self.image_size, self.image_size]
self.class_num = inputs.class_num
self.model_name = "VGG16.model"
if config.saliency: # rgbs or rgb
self.x = tf.placeholder(tf.float32, shape=[None, self.image_size, self.image_size, 4], name='x')
else:
self.x = tf.placeholder(tf.float32, shape=[None, self.image_size, self.image_size, 3], name='x')
# self.y = tf.placeholder(tf.float32, shape=[None, self.class_num], name='y')
self.y = tf.placeholder(tf.int32, shape=[None], name='y')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
self.build_model()
self.build_loss_and_optimizer()
self.merge_summary()
def build_model(self):
# input
net = self.x
print(net.shape)
if self.image_size == 224: # vgg16
filters = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512]
strides = [1, 2, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 2]
gp = 7
hiddens = [4096, 4096]
if self.image_size == 32: # vgg16
filters = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512]
strides = [1, 2, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 2]
gp = 1
hiddens = [4096, 4096]
# elif self.image_size == 32:
# filters = [32, 64, 64, 128, 128] #, 128, 128] #256, 256, 256, 256]
# strides = [2, 1, 2, 1, 2] #, 1, 2] #, 1, 2, 1, 2]
# gp = 4
# hiddens = [200, 200] #, 100]
with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(0.001)):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.elu, updates_collections=None, is_training=self.is_training):
# vgg - conv
for i in range(len(strides)):
net = slim.conv2d(net, filters[i], 3, stride=strides[i], padding='SAME', scope='conv{}'.format(i))
net = slim.batch_norm(net, scope='bn{}'.format(i))
print(net.shape)
# vgg - fc, (=1x1 conv)
net = slim.conv2d(net, hiddens[0], gp, stride=1, padding='VALID', scope='fc{}'.format(i+1))
net = slim.batch_norm(net, scope='bn{}'.format(i + 1))
net = slim.dropout(net, keep_prob=0.25, is_training=self.is_training, scope='dropout{}'.format(i + 1))
print(net.shape)
net = slim.conv2d(net, hiddens[1], 1, stride=1, padding='SAME', scope='fc{}'.format(i+2))
net = slim.batch_norm(net, scope='bn{}'.format(i + 2))
net = slim.dropout(net, keep_prob=0.25, is_training=self.is_training, scope='dropout{}'.format(i + 2))
print(net.shape)
net = slim.conv2d(net, self.class_num, 1, stride=1, padding='SAME', scope='fc{}'.format(i + 3))
print(net.shape)
self.logits = slim.flatten(net)
print(self.logits.shape)
# vgg - fc
# net = slim.flatten(net)
# for ii in range(len(hiddens)):
# net = slim.fully_connected(net, hiddens[ii], scope='fc{}'.format(i+1+ii))
# print(net.shape)
# logits
# self.logits = slim.fully_connected(net, self.class_num, scope='logits')
# print(self.logits.shape)[
def build_loss_and_optimizer(self):
# self.cross_entropy_loss = tf.losses.softmax_cross_entropy(onehot_labels=self.y, logits=self.logits)
# self.accuracy = tf.metrics.accuracy(labels=tf.argmax(self.y, axis=1),
# predictions=tf.argmax(self.logits, axis=1))[1]
self.cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y)
self.cross_entropy_loss = tf.reduce_mean(self.cross_entropy)
self.accuracy = tf.metrics.accuracy(labels=self.y,
predictions=tf.argmax(self.logits, axis=1))[1]
self.optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=self.config.beta1)
#fixme:
# gvs = self.optimizer.compute_gradients(self.cross_entropy_loss)
# capped_gvs = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gvs]
# self.train_op = self.optimizer.apply_gradients(capped_gvs)
self.train_op = self.optimizer.minimize(self.cross_entropy_loss)
def merge_summary(self):
summaries = []
summaries += [tf.summary.image("image", self.x[:, :, :, :3], max_outputs=4)]
if self.config.saliency:
self.s = tf.reshape(self.x[:, :, :, -1], shape=[-1, self.image_size, self.image_size, 1])
summaries += [tf.summary.image("saliency", self.s, max_outputs=4)]
summaries += [tf.summary.scalar("cross_entropy_loss", self.cross_entropy_loss)]
summaries += [tf.summary.scalar("accuracy", self.accuracy)]
self.summary_merge = tf.summary.merge(summaries) | import tensorflow as tf
import os, sys
import numpy as np
slim = tf.contrib.slim
class VGG():
def __init__(self, config, inputs):
self.config = config
self.image_size = inputs.image_size
self.image_shape = [self.image_size, self.image_size]
self.class_num = inputs.class_num
self.model_name = "VGG16.model"
if config.saliency: # rgbs or rgb
self.x = tf.placeholder(tf.float32, shape=[None, self.image_size, self.image_size, 4], name='x')
else:
self.x = tf.placeholder(tf.float32, shape=[None, self.image_size, self.image_size, 3], name='x')
# self.y = tf.placeholder(tf.float32, shape=[None, self.class_num], name='y')
self.y = tf.placeholder(tf.int32, shape=[None], name='y')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
self.build_model()
self.build_loss_and_optimizer()
self.merge_summary()
def build_model(self):
# input
net = self.x
print(net.shape)
if self.image_size == 224: # vgg16
filters = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512]
strides = [1, 2, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 2]
gp = 7
hiddens = [4096, 4096]
if self.image_size == 32: # vgg16
filters = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512]
strides = [1, 2, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 2]
gp = 1
hiddens = [4096, 4096]
# elif self.image_size == 32:
# filters = [32, 64, 64, 128, 128] #, 128, 128] #256, 256, 256, 256]
# strides = [2, 1, 2, 1, 2] #, 1, 2] #, 1, 2, 1, 2]
# gp = 4
# hiddens = [200, 200] #, 100]
with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(0.001)):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.elu, updates_collections=None, is_training=self.is_training):
# vgg - conv
for i in range(len(strides)):
net = slim.conv2d(net, filters[i], 3, stride=strides[i], padding='SAME', scope='conv{}'.format(i))
net = slim.batch_norm(net, scope='bn{}'.format(i))
print(net.shape)
# vgg - fc, (=1x1 conv)
net = slim.conv2d(net, hiddens[0], gp, stride=1, padding='VALID', scope='fc{}'.format(i+1))
net = slim.batch_norm(net, scope='bn{}'.format(i + 1))
net = slim.dropout(net, keep_prob=0.25, is_training=self.is_training, scope='dropout{}'.format(i + 1))
print(net.shape)
net = slim.conv2d(net, hiddens[1], 1, stride=1, padding='SAME', scope='fc{}'.format(i+2))
net = slim.batch_norm(net, scope='bn{}'.format(i + 2))
net = slim.dropout(net, keep_prob=0.25, is_training=self.is_training, scope='dropout{}'.format(i + 2))
print(net.shape)
net = slim.conv2d(net, self.class_num, 1, stride=1, padding='SAME', scope='fc{}'.format(i + 3))
print(net.shape)
self.logits = slim.flatten(net)
print(self.logits.shape)
# vgg - fc
# net = slim.flatten(net)
# for ii in range(len(hiddens)):
# net = slim.fully_connected(net, hiddens[ii], scope='fc{}'.format(i+1+ii))
# print(net.shape)
# logits
# self.logits = slim.fully_connected(net, self.class_num, scope='logits')
# print(self.logits.shape)[
def build_loss_and_optimizer(self):
# self.cross_entropy_loss = tf.losses.softmax_cross_entropy(onehot_labels=self.y, logits=self.logits)
# self.accuracy = tf.metrics.accuracy(labels=tf.argmax(self.y, axis=1),
# predictions=tf.argmax(self.logits, axis=1))[1]
self.cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y)
self.cross_entropy_loss = tf.reduce_mean(self.cross_entropy)
self.accuracy = tf.metrics.accuracy(labels=self.y,
predictions=tf.argmax(self.logits, axis=1))[1]
self.optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=self.config.beta1)
#fixme:
# gvs = self.optimizer.compute_gradients(self.cross_entropy_loss)
# capped_gvs = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gvs]
# self.train_op = self.optimizer.apply_gradients(capped_gvs)
self.train_op = self.optimizer.minimize(self.cross_entropy_loss)
def merge_summary(self):
summaries = []
summaries += [tf.summary.image("image", self.x[:, :, :, :3], max_outputs=4)]
if self.config.saliency:
self.s = tf.reshape(self.x[:, :, :, -1], shape=[-1, self.image_size, self.image_size, 1])
summaries += [tf.summary.image("saliency", self.s, max_outputs=4)]
summaries += [tf.summary.scalar("cross_entropy_loss", self.cross_entropy_loss)]
summaries += [tf.summary.scalar("accuracy", self.accuracy)]
self.summary_merge = tf.summary.merge(summaries) | en | 0.255005 | # rgbs or rgb # self.y = tf.placeholder(tf.float32, shape=[None, self.class_num], name='y') # input # vgg16 # vgg16 # elif self.image_size == 32: # filters = [32, 64, 64, 128, 128] #, 128, 128] #256, 256, 256, 256] # strides = [2, 1, 2, 1, 2] #, 1, 2] #, 1, 2, 1, 2] # gp = 4 # hiddens = [200, 200] #, 100] # vgg - conv # vgg - fc, (=1x1 conv) # vgg - fc # net = slim.flatten(net) # for ii in range(len(hiddens)): # net = slim.fully_connected(net, hiddens[ii], scope='fc{}'.format(i+1+ii)) # print(net.shape) # logits # self.logits = slim.fully_connected(net, self.class_num, scope='logits') # print(self.logits.shape)[ # self.cross_entropy_loss = tf.losses.softmax_cross_entropy(onehot_labels=self.y, logits=self.logits) # self.accuracy = tf.metrics.accuracy(labels=tf.argmax(self.y, axis=1), # predictions=tf.argmax(self.logits, axis=1))[1] #fixme: # gvs = self.optimizer.compute_gradients(self.cross_entropy_loss) # capped_gvs = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gvs] # self.train_op = self.optimizer.apply_gradients(capped_gvs) | 2.513392 | 3 |
hupwatch/command.py | swistakm/hupwatch | 8 | 6633050 | # -*- coding: utf-8 -*-
import atexit
import os
import time
import signal
import logging
from hupwatch.service import Service
from hupwatch.args_parser import parse_args
logger = logging.getLogger(__name__)
delayed_exit = False
def setup_logging(verbosity):
ilogger = logging.getLogger(__name__)
if verbosity:
handler = logging.StreamHandler()
if verbosity == 1:
handler.setLevel(logging.ERROR)
if verbosity == 2:
handler.setLevel(logging.WARNING)
if verbosity >= 3:
handler.setLevel(logging.DEBUG)
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
'=> HUP watch [%(levelname)-8s]: %(message)s'
)
handler.setFormatter(formatter)
ilogger.setLevel(logging.DEBUG)
ilogger.addHandler(handler)
def main():
args, command = parse_args()
setup_logging(args.verbose)
logger.info("Starting HUP watch (%s)" % os.getpid())
# use list becasue Python 2 does not provide nonlocal statement
services = [Service(command)]
services[0].start()
logger.info("Child process {pid} started".format(
pid=services[0].process.pid
))
def hup_handler(*_):
logger.debug("HUP: >>>")
try:
old_service = services.pop()
except IndexError:
logger.error("HUP: Received HUP while service list is empty")
return
new_service = Service(command)
new_service.start()
logger.debug("HUP: Waiting for process ({pid}) to warm up".format(
pid=new_service.process.pid,
))
time.sleep(args.warmup_time)
if new_service.is_up():
logger.debug("HUP: Sending SIGTERM to old process ({pid})".format(
pid=old_service.process.pid,
))
old_service.process.send_signal(signal.SIGTERM)
logger.debug("HUP: Waiting for process ({pid}) to quit...".format(
pid=old_service.process.pid
))
logger.info(
"HUP: Old process quit with code: {code}".format(
code=old_service.process.wait()
)
)
services.append(new_service)
else:
# note: It may look like there is a small race condition between
# SIGHUP and SIGCHLD but sigchld_handler will check if
# current service is running so hupwatch won't quit eagerly
# note: We may think about getting rid of SIGCHLD handler anyway
# and simply poll service[0] process later in the main loop.
# This may simplify things a bit
logger.error("HUP: new process failed to start. Abort reload")
services.append(old_service)
logger.debug("HUP: <<<")
def sigchld_handler(*_):
logger.debug("CHLD: >>>")
try:
service = services.pop()
except IndexError:
logger.info("CHLD: Child process quit")
else:
if service.is_up():
logger.warning(
"CHLD: Primary child process quit, quitting"
)
exit(1)
else:
logger.info(
"CHLD: Primary process is up, continuing..."
)
services.append(service)
logger.debug("CHLD: <<<")
def term_handler(*_):
logger.debug("TERM: >>>")
try:
service = services.pop()
except IndexError:
# note: apparently we have interrupted other signal handler
# so raise alarm that will try to run this handler again
logger.info(
"TERM: TERM/ALARM received during other signal handling. Defer."
)
signal.alarm(1)
else:
if service.is_up():
if args.kill_at_exit:
logger.warning(
"TERM: Quiting with --kill-at-exit and running "
"child process. Killing it!"
)
service.kill()
else:
logger.warning(
"TERM: Quiting with running child process. "
"Doing nothing, child will be detached to new parent."
)
else:
logger.debug("Child process not up. Quiting.")
services.append(service)
logger.debug("TERM: <<<")
exit()
signal.signal(signal.SIGHUP, hup_handler)
signal.signal(signal.SIGCHLD, sigchld_handler)
signal.signal(signal.SIGTERM, term_handler)
signal.signal(signal.SIGALRM, term_handler)
atexit.register(term_handler)
while services[0].is_up():
logger.info("Pausing for signal")
signal.pause()
if delayed_exit:
logger.info("delayed exit")
exit()
| # -*- coding: utf-8 -*-
import atexit
import os
import time
import signal
import logging
from hupwatch.service import Service
from hupwatch.args_parser import parse_args
logger = logging.getLogger(__name__)
delayed_exit = False
def setup_logging(verbosity):
ilogger = logging.getLogger(__name__)
if verbosity:
handler = logging.StreamHandler()
if verbosity == 1:
handler.setLevel(logging.ERROR)
if verbosity == 2:
handler.setLevel(logging.WARNING)
if verbosity >= 3:
handler.setLevel(logging.DEBUG)
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
'=> HUP watch [%(levelname)-8s]: %(message)s'
)
handler.setFormatter(formatter)
ilogger.setLevel(logging.DEBUG)
ilogger.addHandler(handler)
def main():
args, command = parse_args()
setup_logging(args.verbose)
logger.info("Starting HUP watch (%s)" % os.getpid())
# use list becasue Python 2 does not provide nonlocal statement
services = [Service(command)]
services[0].start()
logger.info("Child process {pid} started".format(
pid=services[0].process.pid
))
def hup_handler(*_):
logger.debug("HUP: >>>")
try:
old_service = services.pop()
except IndexError:
logger.error("HUP: Received HUP while service list is empty")
return
new_service = Service(command)
new_service.start()
logger.debug("HUP: Waiting for process ({pid}) to warm up".format(
pid=new_service.process.pid,
))
time.sleep(args.warmup_time)
if new_service.is_up():
logger.debug("HUP: Sending SIGTERM to old process ({pid})".format(
pid=old_service.process.pid,
))
old_service.process.send_signal(signal.SIGTERM)
logger.debug("HUP: Waiting for process ({pid}) to quit...".format(
pid=old_service.process.pid
))
logger.info(
"HUP: Old process quit with code: {code}".format(
code=old_service.process.wait()
)
)
services.append(new_service)
else:
# note: It may look like there is a small race condition between
# SIGHUP and SIGCHLD but sigchld_handler will check if
# current service is running so hupwatch won't quit eagerly
# note: We may think about getting rid of SIGCHLD handler anyway
# and simply poll service[0] process later in the main loop.
# This may simplify things a bit
logger.error("HUP: new process failed to start. Abort reload")
services.append(old_service)
logger.debug("HUP: <<<")
def sigchld_handler(*_):
logger.debug("CHLD: >>>")
try:
service = services.pop()
except IndexError:
logger.info("CHLD: Child process quit")
else:
if service.is_up():
logger.warning(
"CHLD: Primary child process quit, quitting"
)
exit(1)
else:
logger.info(
"CHLD: Primary process is up, continuing..."
)
services.append(service)
logger.debug("CHLD: <<<")
def term_handler(*_):
logger.debug("TERM: >>>")
try:
service = services.pop()
except IndexError:
# note: apparently we have interrupted other signal handler
# so raise alarm that will try to run this handler again
logger.info(
"TERM: TERM/ALARM received during other signal handling. Defer."
)
signal.alarm(1)
else:
if service.is_up():
if args.kill_at_exit:
logger.warning(
"TERM: Quiting with --kill-at-exit and running "
"child process. Killing it!"
)
service.kill()
else:
logger.warning(
"TERM: Quiting with running child process. "
"Doing nothing, child will be detached to new parent."
)
else:
logger.debug("Child process not up. Quiting.")
services.append(service)
logger.debug("TERM: <<<")
exit()
signal.signal(signal.SIGHUP, hup_handler)
signal.signal(signal.SIGCHLD, sigchld_handler)
signal.signal(signal.SIGTERM, term_handler)
signal.signal(signal.SIGALRM, term_handler)
atexit.register(term_handler)
while services[0].is_up():
logger.info("Pausing for signal")
signal.pause()
if delayed_exit:
logger.info("delayed exit")
exit()
| en | 0.931599 | # -*- coding: utf-8 -*- # use list becasue Python 2 does not provide nonlocal statement # note: It may look like there is a small race condition between # SIGHUP and SIGCHLD but sigchld_handler will check if # current service is running so hupwatch won't quit eagerly # note: We may think about getting rid of SIGCHLD handler anyway # and simply poll service[0] process later in the main loop. # This may simplify things a bit # note: apparently we have interrupted other signal handler # so raise alarm that will try to run this handler again | 2.349403 | 2 |
Subsets and Splits